repo
stringlengths 1
191
⌀ | file
stringlengths 23
351
| code
stringlengths 0
5.32M
| file_length
int64 0
5.32M
| avg_line_length
float64 0
2.9k
| max_line_length
int64 0
288k
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/records/AMRMTokenSecretManagerState.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.resourcemanager.recovery.records;
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerRecoveryProtos.AMRMTokenSecretManagerStateProto;
import org.apache.hadoop.yarn.server.api.records.MasterKey;
import org.apache.hadoop.yarn.server.resourcemanager.security.AMRMTokenSecretManager;
import org.apache.hadoop.yarn.util.Records;
/**
* Contains all the state data that needs to be stored persistently
* for {@link AMRMTokenSecretManager}
*/
@Public
@Unstable
public abstract class AMRMTokenSecretManagerState {
public static AMRMTokenSecretManagerState newInstance(
MasterKey currentMasterKey, MasterKey nextMasterKey) {
AMRMTokenSecretManagerState data =
Records.newRecord(AMRMTokenSecretManagerState.class);
data.setCurrentMasterKey(currentMasterKey);
data.setNextMasterKey(nextMasterKey);
return data;
}
public static AMRMTokenSecretManagerState newInstance(
AMRMTokenSecretManagerState state) {
AMRMTokenSecretManagerState data =
Records.newRecord(AMRMTokenSecretManagerState.class);
data.setCurrentMasterKey(state.getCurrentMasterKey());
data.setNextMasterKey(state.getNextMasterKey());
return data;
}
/**
* {@link AMRMTokenSecretManager} current Master key
*/
@Public
@Unstable
public abstract MasterKey getCurrentMasterKey();
@Public
@Unstable
public abstract void setCurrentMasterKey(MasterKey currentMasterKey);
/**
* {@link AMRMTokenSecretManager} next Master key
*/
@Public
@Unstable
public abstract MasterKey getNextMasterKey();
@Public
@Unstable
public abstract void setNextMasterKey(MasterKey nextMasterKey);
public abstract AMRMTokenSecretManagerStateProto getProto();
}
| 2,693 | 33.987013 | 109 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/records/RMDelegationTokenIdentifierData.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.resourcemanager.recovery.records;
import java.io.ByteArrayInputStream;
import java.io.DataInput;
import java.io.DataInputStream;
import java.io.IOException;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerRecoveryProtos.RMDelegationTokenIdentifierDataProto;
import org.apache.hadoop.yarn.security.client.RMDelegationTokenIdentifier;
import org.apache.hadoop.yarn.security.client.YARNDelegationTokenIdentifier;
public class RMDelegationTokenIdentifierData {
RMDelegationTokenIdentifierDataProto.Builder builder =
RMDelegationTokenIdentifierDataProto.newBuilder();
public RMDelegationTokenIdentifierData() {}
public RMDelegationTokenIdentifierData(
YARNDelegationTokenIdentifier identifier, long renewdate) {
builder.setTokenIdentifier(identifier.getProto());
builder.setRenewDate(renewdate);
}
public void readFields(DataInput in) throws IOException {
builder.mergeFrom((DataInputStream) in);
}
public byte[] toByteArray() throws IOException {
return builder.build().toByteArray();
}
public RMDelegationTokenIdentifier getTokenIdentifier() throws IOException {
ByteArrayInputStream in =
new ByteArrayInputStream(builder.getTokenIdentifier().toByteArray());
RMDelegationTokenIdentifier identifer = new RMDelegationTokenIdentifier();
identifer.readFields(new DataInputStream(in));
return identifer;
}
public long getRenewDate() {
return builder.getRenewDate();
}
}
| 2,310 | 36.274194 | 113 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/records/Epoch.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.resourcemanager.recovery.records;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerRecoveryProtos.EpochProto;
import org.apache.hadoop.yarn.util.Records;
/**
* The epoch information of RM for work-preserving restart.
* Epoch is incremented each time RM restart. It's used for assuring
* uniqueness of <code>ContainerId</code>.
*/
@Private
@Unstable
public abstract class Epoch {
public static Epoch newInstance(long sequenceNumber) {
Epoch epoch = Records.newRecord(Epoch.class);
epoch.setEpoch(sequenceNumber);
return epoch;
}
public abstract long getEpoch();
public abstract void setEpoch(long sequenceNumber);
public abstract EpochProto getProto();
public String toString() {
return String.valueOf(getEpoch());
}
@Override
public int hashCode() {
return (int) (getEpoch() ^ (getEpoch() >>> 32));
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
Epoch other = (Epoch) obj;
if (this.getEpoch() == other.getEpoch()) {
return true;
} else {
return false;
}
}
}
| 2,179 | 29.277778 | 87 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/records/impl/pb/ApplicationStateDataPBImpl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.resourcemanager.recovery.records.impl.pb;
import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationSubmissionContextPBImpl;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerRecoveryProtos.ApplicationStateDataProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerRecoveryProtos.ApplicationStateDataProtoOrBuilder;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerRecoveryProtos.RMAppStateProto;
import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.ApplicationStateData;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState;
import com.google.protobuf.TextFormat;
public class ApplicationStateDataPBImpl extends ApplicationStateData {
ApplicationStateDataProto proto =
ApplicationStateDataProto.getDefaultInstance();
ApplicationStateDataProto.Builder builder = null;
boolean viaProto = false;
private ApplicationSubmissionContext applicationSubmissionContext = null;
public ApplicationStateDataPBImpl() {
builder = ApplicationStateDataProto.newBuilder();
}
public ApplicationStateDataPBImpl(
ApplicationStateDataProto proto) {
this.proto = proto;
viaProto = true;
}
@Override
public ApplicationStateDataProto getProto() {
mergeLocalToProto();
proto = viaProto ? proto : builder.build();
viaProto = true;
return proto;
}
private void mergeLocalToBuilder() {
if (this.applicationSubmissionContext != null) {
builder.setApplicationSubmissionContext(
((ApplicationSubmissionContextPBImpl)applicationSubmissionContext)
.getProto());
}
}
private void mergeLocalToProto() {
if (viaProto)
maybeInitBuilder();
mergeLocalToBuilder();
proto = builder.build();
viaProto = true;
}
private void maybeInitBuilder() {
if (viaProto || builder == null) {
builder = ApplicationStateDataProto.newBuilder(proto);
}
viaProto = false;
}
@Override
public long getSubmitTime() {
ApplicationStateDataProtoOrBuilder p = viaProto ? proto : builder;
if (!p.hasSubmitTime()) {
return -1;
}
return (p.getSubmitTime());
}
@Override
public void setSubmitTime(long submitTime) {
maybeInitBuilder();
builder.setSubmitTime(submitTime);
}
@Override
public long getStartTime() {
ApplicationStateDataProtoOrBuilder p = viaProto ? proto : builder;
return p.getStartTime();
}
@Override
public void setStartTime(long startTime) {
maybeInitBuilder();
builder.setStartTime(startTime);
}
@Override
public String getUser() {
ApplicationStateDataProtoOrBuilder p = viaProto ? proto : builder;
if (!p.hasUser()) {
return null;
}
return (p.getUser());
}
@Override
public void setUser(String user) {
maybeInitBuilder();
builder.setUser(user);
}
@Override
public ApplicationSubmissionContext getApplicationSubmissionContext() {
ApplicationStateDataProtoOrBuilder p = viaProto ? proto : builder;
if(applicationSubmissionContext != null) {
return applicationSubmissionContext;
}
if (!p.hasApplicationSubmissionContext()) {
return null;
}
applicationSubmissionContext =
new ApplicationSubmissionContextPBImpl(
p.getApplicationSubmissionContext());
return applicationSubmissionContext;
}
@Override
public void setApplicationSubmissionContext(
ApplicationSubmissionContext context) {
maybeInitBuilder();
if (context == null) {
builder.clearApplicationSubmissionContext();
}
this.applicationSubmissionContext = context;
}
@Override
public RMAppState getState() {
ApplicationStateDataProtoOrBuilder p = viaProto ? proto : builder;
if (!p.hasApplicationState()) {
return null;
}
return convertFromProtoFormat(p.getApplicationState());
}
@Override
public void setState(RMAppState finalState) {
maybeInitBuilder();
if (finalState == null) {
builder.clearApplicationState();
return;
}
builder.setApplicationState(convertToProtoFormat(finalState));
}
@Override
public String getDiagnostics() {
ApplicationStateDataProtoOrBuilder p = viaProto ? proto : builder;
if (!p.hasDiagnostics()) {
return null;
}
return p.getDiagnostics();
}
@Override
public void setDiagnostics(String diagnostics) {
maybeInitBuilder();
if (diagnostics == null) {
builder.clearDiagnostics();
return;
}
builder.setDiagnostics(diagnostics);
}
@Override
public long getFinishTime() {
ApplicationStateDataProtoOrBuilder p = viaProto ? proto : builder;
return p.getFinishTime();
}
@Override
public void setFinishTime(long finishTime) {
maybeInitBuilder();
builder.setFinishTime(finishTime);
}
@Override
public int hashCode() {
return getProto().hashCode();
}
@Override
public boolean equals(Object other) {
if (other == null)
return false;
if (other.getClass().isAssignableFrom(this.getClass())) {
return this.getProto().equals(this.getClass().cast(other).getProto());
}
return false;
}
@Override
public String toString() {
return TextFormat.shortDebugString(getProto());
}
private static String RM_APP_PREFIX = "RMAPP_";
public static RMAppStateProto convertToProtoFormat(RMAppState e) {
return RMAppStateProto.valueOf(RM_APP_PREFIX + e.name());
}
public static RMAppState convertFromProtoFormat(RMAppStateProto e) {
return RMAppState.valueOf(e.name().replace(RM_APP_PREFIX, ""));
}
}
| 6,514 | 27.827434 | 111 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/records/impl/pb/AMRMTokenSecretManagerStatePBImpl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.resourcemanager.recovery.records.impl.pb;
import org.apache.hadoop.yarn.proto.YarnServerCommonProtos.MasterKeyProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerRecoveryProtos.AMRMTokenSecretManagerStateProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerRecoveryProtos.AMRMTokenSecretManagerStateProtoOrBuilder;
import org.apache.hadoop.yarn.server.api.records.MasterKey;
import org.apache.hadoop.yarn.server.api.records.impl.pb.MasterKeyPBImpl;
import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.AMRMTokenSecretManagerState;
public class AMRMTokenSecretManagerStatePBImpl extends AMRMTokenSecretManagerState{
AMRMTokenSecretManagerStateProto proto =
AMRMTokenSecretManagerStateProto.getDefaultInstance();
AMRMTokenSecretManagerStateProto.Builder builder = null;
boolean viaProto = false;
private MasterKey currentMasterKey = null;
private MasterKey nextMasterKey = null;
public AMRMTokenSecretManagerStatePBImpl() {
builder = AMRMTokenSecretManagerStateProto.newBuilder();
}
public AMRMTokenSecretManagerStatePBImpl(AMRMTokenSecretManagerStateProto proto) {
this.proto = proto;
viaProto = true;
}
public AMRMTokenSecretManagerStateProto getProto() {
mergeLocalToProto();
proto = viaProto ? proto : builder.build();
viaProto = true;
return proto;
}
private void mergeLocalToBuilder() {
if (this.currentMasterKey != null) {
builder.setCurrentMasterKey(convertToProtoFormat(this.currentMasterKey));
}
if (this.nextMasterKey != null) {
builder.setNextMasterKey(convertToProtoFormat(this.nextMasterKey));
}
}
private void mergeLocalToProto() {
if (viaProto)
maybeInitBuilder();
mergeLocalToBuilder();
proto = builder.build();
viaProto = true;
}
private void maybeInitBuilder() {
if (viaProto || builder == null) {
builder = AMRMTokenSecretManagerStateProto.newBuilder(proto);
}
viaProto = false;
}
@Override
public MasterKey getCurrentMasterKey() {
AMRMTokenSecretManagerStateProtoOrBuilder p = viaProto ? proto : builder;
if (this.currentMasterKey != null) {
return this.currentMasterKey;
}
if (!p.hasCurrentMasterKey()) {
return null;
}
this.currentMasterKey = convertFromProtoFormat(p.getCurrentMasterKey());
return this.currentMasterKey;
}
@Override
public void setCurrentMasterKey(MasterKey currentMasterKey) {
maybeInitBuilder();
if (currentMasterKey == null)
builder.clearCurrentMasterKey();
this.currentMasterKey = currentMasterKey;
}
@Override
public MasterKey getNextMasterKey() {
AMRMTokenSecretManagerStateProtoOrBuilder p = viaProto ? proto : builder;
if (this.nextMasterKey != null) {
return this.nextMasterKey;
}
if (!p.hasNextMasterKey()) {
return null;
}
this.nextMasterKey = convertFromProtoFormat(p.getNextMasterKey());
return this.nextMasterKey;
}
@Override
public void setNextMasterKey(MasterKey nextMasterKey) {
maybeInitBuilder();
if (nextMasterKey == null)
builder.clearNextMasterKey();
this.nextMasterKey = nextMasterKey;
}
private MasterKeyProto convertToProtoFormat(MasterKey t) {
return ((MasterKeyPBImpl) t).getProto();
}
private MasterKeyPBImpl convertFromProtoFormat(MasterKeyProto p) {
return new MasterKeyPBImpl(p);
}
}
| 4,257 | 32.527559 | 118 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/records/impl/pb/EpochPBImpl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.resourcemanager.recovery.records.impl.pb;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerRecoveryProtos.EpochProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerRecoveryProtos.EpochProtoOrBuilder;
import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.Epoch;
public class EpochPBImpl extends Epoch {
EpochProto proto = EpochProto.getDefaultInstance();
EpochProto.Builder builder = null;
boolean viaProto = false;
public EpochPBImpl() {
builder = EpochProto.newBuilder();
}
public EpochPBImpl(EpochProto proto) {
this.proto = proto;
viaProto = true;
}
public EpochProto getProto() {
proto = viaProto ? proto : builder.build();
viaProto = true;
return proto;
}
private void maybeInitBuilder() {
if (viaProto || builder == null) {
builder = EpochProto.newBuilder(proto);
}
viaProto = false;
}
@Override
public long getEpoch() {
EpochProtoOrBuilder p = viaProto ? proto : builder;
return p.getEpoch();
}
@Override
public void setEpoch(long sequentialNumber) {
maybeInitBuilder();
builder.setEpoch(sequentialNumber);
}
}
| 2,011 | 28.588235 | 96 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/records/impl/pb/ApplicationAttemptStateDataPBImpl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.resourcemanager.recovery.records.impl.pb;
import java.io.IOException;
import java.nio.ByteBuffer;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.io.DataInputByteBuffer;
import org.apache.hadoop.io.DataOutputBuffer;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.Container;
import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationAttemptIdPBImpl;
import org.apache.hadoop.yarn.api.records.impl.pb.ContainerPBImpl;
import org.apache.hadoop.yarn.api.records.impl.pb.ProtoUtils;
import org.apache.hadoop.yarn.proto.YarnProtos.FinalApplicationStatusProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerRecoveryProtos.ApplicationAttemptStateDataProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerRecoveryProtos.ApplicationAttemptStateDataProtoOrBuilder;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerRecoveryProtos.RMAppAttemptStateProto;
import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.ApplicationAttemptStateData;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptState;
import com.google.protobuf.TextFormat;
public class ApplicationAttemptStateDataPBImpl extends
ApplicationAttemptStateData {
private static Log LOG =
LogFactory.getLog(ApplicationAttemptStateDataPBImpl.class);
ApplicationAttemptStateDataProto proto =
ApplicationAttemptStateDataProto.getDefaultInstance();
ApplicationAttemptStateDataProto.Builder builder = null;
boolean viaProto = false;
private ApplicationAttemptId attemptId = null;
private Container masterContainer = null;
private ByteBuffer appAttemptTokens = null;
public ApplicationAttemptStateDataPBImpl() {
builder = ApplicationAttemptStateDataProto.newBuilder();
}
public ApplicationAttemptStateDataPBImpl(
ApplicationAttemptStateDataProto proto) {
this.proto = proto;
viaProto = true;
}
@Override
public ApplicationAttemptStateDataProto getProto() {
mergeLocalToProto();
proto = viaProto ? proto : builder.build();
viaProto = true;
return proto;
}
private void mergeLocalToBuilder() {
if (this.attemptId != null) {
builder.setAttemptId(((ApplicationAttemptIdPBImpl)attemptId).getProto());
}
if(this.masterContainer != null) {
builder.setMasterContainer(((ContainerPBImpl)masterContainer).getProto());
}
if(this.appAttemptTokens != null) {
builder.setAppAttemptTokens(ProtoUtils.convertToProtoFormat(
this.appAttemptTokens));
}
}
private void mergeLocalToProto() {
if (viaProto)
maybeInitBuilder();
mergeLocalToBuilder();
proto = builder.build();
viaProto = true;
}
private void maybeInitBuilder() {
if (viaProto || builder == null) {
builder = ApplicationAttemptStateDataProto.newBuilder(proto);
}
viaProto = false;
}
@Override
public ApplicationAttemptId getAttemptId() {
ApplicationAttemptStateDataProtoOrBuilder p = viaProto ? proto : builder;
if(attemptId != null) {
return attemptId;
}
if (!p.hasAttemptId()) {
return null;
}
attemptId = new ApplicationAttemptIdPBImpl(p.getAttemptId());
return attemptId;
}
@Override
public void setAttemptId(ApplicationAttemptId attemptId) {
maybeInitBuilder();
if (attemptId == null) {
builder.clearAttemptId();
}
this.attemptId = attemptId;
}
@Override
public Container getMasterContainer() {
ApplicationAttemptStateDataProtoOrBuilder p = viaProto ? proto : builder;
if(masterContainer != null) {
return masterContainer;
}
if (!p.hasMasterContainer()) {
return null;
}
masterContainer = new ContainerPBImpl(p.getMasterContainer());
return masterContainer;
}
@Override
public void setMasterContainer(Container container) {
maybeInitBuilder();
if (container == null) {
builder.clearMasterContainer();
}
this.masterContainer = container;
}
@Override
public Credentials getAppAttemptTokens() {
ApplicationAttemptStateDataProtoOrBuilder p = viaProto ? proto : builder;
if(appAttemptTokens != null) {
return convertCredentialsFromByteBuffer(appAttemptTokens);
}
if(!p.hasAppAttemptTokens()) {
return null;
}
this.appAttemptTokens = ProtoUtils.convertFromProtoFormat(
p.getAppAttemptTokens());
return convertCredentialsFromByteBuffer(appAttemptTokens);
}
@Override
public void setAppAttemptTokens(Credentials attemptTokens) {
maybeInitBuilder();
if(attemptTokens == null) {
builder.clearAppAttemptTokens();
return;
}
this.appAttemptTokens = convertCredentialsToByteBuffer(attemptTokens);
}
@Override
public RMAppAttemptState getState() {
ApplicationAttemptStateDataProtoOrBuilder p = viaProto ? proto : builder;
if (!p.hasAppAttemptState()) {
return null;
}
return convertFromProtoFormat(p.getAppAttemptState());
}
@Override
public void setState(RMAppAttemptState state) {
maybeInitBuilder();
if (state == null) {
builder.clearAppAttemptState();
return;
}
builder.setAppAttemptState(convertToProtoFormat(state));
}
@Override
public String getFinalTrackingUrl() {
ApplicationAttemptStateDataProtoOrBuilder p = viaProto ? proto : builder;
if (!p.hasFinalTrackingUrl()) {
return null;
}
return p.getFinalTrackingUrl();
}
@Override
public void setFinalTrackingUrl(String url) {
maybeInitBuilder();
if (url == null) {
builder.clearFinalTrackingUrl();
return;
}
builder.setFinalTrackingUrl(url);
}
@Override
public String getDiagnostics() {
ApplicationAttemptStateDataProtoOrBuilder p = viaProto ? proto : builder;
if (!p.hasDiagnostics()) {
return null;
}
return p.getDiagnostics();
}
@Override
public void setDiagnostics(String diagnostics) {
maybeInitBuilder();
if (diagnostics == null) {
builder.clearDiagnostics();
return;
}
builder.setDiagnostics(diagnostics);
}
@Override
public long getStartTime() {
ApplicationAttemptStateDataProtoOrBuilder p = viaProto ? proto : builder;
return p.getStartTime();
}
@Override
public void setStartTime(long startTime) {
maybeInitBuilder();
builder.setStartTime(startTime);
}
@Override
public long getMemorySeconds() {
ApplicationAttemptStateDataProtoOrBuilder p = viaProto ? proto : builder;
return p.getMemorySeconds();
}
@Override
public long getVcoreSeconds() {
ApplicationAttemptStateDataProtoOrBuilder p = viaProto ? proto : builder;
return p.getVcoreSeconds();
}
@Override
public void setMemorySeconds(long memorySeconds) {
maybeInitBuilder();
builder.setMemorySeconds(memorySeconds);
}
@Override
public void setVcoreSeconds(long vcoreSeconds) {
maybeInitBuilder();
builder.setVcoreSeconds(vcoreSeconds);
}
@Override
public FinalApplicationStatus getFinalApplicationStatus() {
ApplicationAttemptStateDataProtoOrBuilder p = viaProto ? proto : builder;
if (!p.hasFinalApplicationStatus()) {
return null;
}
return convertFromProtoFormat(p.getFinalApplicationStatus());
}
@Override
public void setFinalApplicationStatus(FinalApplicationStatus finishState) {
maybeInitBuilder();
if (finishState == null) {
builder.clearFinalApplicationStatus();
return;
}
builder.setFinalApplicationStatus(convertToProtoFormat(finishState));
}
@Override
public int hashCode() {
return getProto().hashCode();
}
@Override
public int getAMContainerExitStatus() {
ApplicationAttemptStateDataProtoOrBuilder p = viaProto ? proto : builder;
return p.getAmContainerExitStatus();
}
@Override
public void setAMContainerExitStatus(int exitStatus) {
maybeInitBuilder();
builder.setAmContainerExitStatus(exitStatus);
}
@Override
public boolean equals(Object other) {
if (other == null)
return false;
if (other.getClass().isAssignableFrom(this.getClass())) {
return this.getProto().equals(this.getClass().cast(other).getProto());
}
return false;
}
@Override
public String toString() {
return TextFormat.shortDebugString(getProto());
}
private static String RM_APP_ATTEMPT_PREFIX = "RMATTEMPT_";
public static RMAppAttemptStateProto convertToProtoFormat(RMAppAttemptState e) {
return RMAppAttemptStateProto.valueOf(RM_APP_ATTEMPT_PREFIX + e.name());
}
public static RMAppAttemptState convertFromProtoFormat(RMAppAttemptStateProto e) {
return RMAppAttemptState.valueOf(e.name().replace(RM_APP_ATTEMPT_PREFIX, ""));
}
private FinalApplicationStatusProto convertToProtoFormat(FinalApplicationStatus s) {
return ProtoUtils.convertToProtoFormat(s);
}
private FinalApplicationStatus convertFromProtoFormat(FinalApplicationStatusProto s) {
return ProtoUtils.convertFromProtoFormat(s);
}
@Override
public long getFinishTime() {
ApplicationAttemptStateDataProtoOrBuilder p = viaProto ? proto : builder;
return p.getFinishTime();
}
@Override
public void setFinishTime(long finishTime) {
maybeInitBuilder();
builder.setFinishTime(finishTime);
}
private static ByteBuffer convertCredentialsToByteBuffer(
Credentials credentials) {
ByteBuffer appAttemptTokens = null;
DataOutputBuffer dob = new DataOutputBuffer();
try {
if (credentials != null) {
credentials.writeTokenStorageToStream(dob);
appAttemptTokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength());
}
return appAttemptTokens;
} catch (IOException e) {
LOG.error("Failed to convert Credentials to ByteBuffer.");
assert false;
return null;
} finally {
IOUtils.closeStream(dob);
}
}
private static Credentials convertCredentialsFromByteBuffer(
ByteBuffer appAttemptTokens) {
DataInputByteBuffer dibb = new DataInputByteBuffer();
try {
Credentials credentials = null;
if (appAttemptTokens != null) {
credentials = new Credentials();
appAttemptTokens.rewind();
dibb.reset(appAttemptTokens);
credentials.readTokenStorageStream(dibb);
}
return credentials;
} catch (IOException e) {
LOG.error("Failed to convert Credentials from ByteBuffer.");
assert false;
return null;
} finally {
IOUtils.closeStream(dibb);
}
}
}
| 11,596 | 29.200521 | 118 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ahs/WritingContainerFinishEvent.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.resourcemanager.ahs;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ContainerFinishData;
public class WritingContainerFinishEvent extends WritingApplicationHistoryEvent {
private ContainerId containerId;
private ContainerFinishData containerFinish;
public WritingContainerFinishEvent(ContainerId containerId,
ContainerFinishData containerFinish) {
super(WritingHistoryEventType.CONTAINER_FINISH);
this.containerId = containerId;
this.containerFinish = containerFinish;
}
@Override
public int hashCode() {
return containerId.getApplicationAttemptId().getApplicationId().hashCode();
}
public ContainerId getContainerId() {
return containerId;
}
public ContainerFinishData getContainerFinishData() {
return containerFinish;
}
}
| 1,710 | 33.22 | 91 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ahs/RMApplicationHistoryWriter.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.resourcemanager.ahs;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.service.CompositeService;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.event.AsyncDispatcher;
import org.apache.hadoop.yarn.event.Dispatcher;
import org.apache.hadoop.yarn.event.Event;
import org.apache.hadoop.yarn.event.EventHandler;
import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
import org.apache.hadoop.yarn.server.applicationhistoryservice.ApplicationHistoryStore;
import org.apache.hadoop.yarn.server.applicationhistoryservice.ApplicationHistoryWriter;
import org.apache.hadoop.yarn.server.applicationhistoryservice.FileSystemApplicationHistoryStore;
import org.apache.hadoop.yarn.server.applicationhistoryservice.NullApplicationHistoryStore;
import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ApplicationAttemptFinishData;
import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ApplicationAttemptStartData;
import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ApplicationFinishData;
import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ApplicationStartData;
import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ContainerFinishData;
import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ContainerStartData;
import org.apache.hadoop.yarn.server.resourcemanager.RMServerUtils;
import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptState;
import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer;
import com.google.common.annotations.VisibleForTesting;
/**
* <p>
* {@link ResourceManager} uses this class to write the information of
* {@link RMApp}, {@link RMAppAttempt} and {@link RMContainer}. These APIs are
* non-blocking, and just schedule a writing history event. An self-contained
* dispatcher vector will handle the event in separate threads, and extract the
* required fields that are going to be persisted. Then, the extracted
* information will be persisted via the implementation of
* {@link ApplicationHistoryStore}.
* </p>
*/
@Private
@Unstable
public class RMApplicationHistoryWriter extends CompositeService {
public static final Log LOG = LogFactory
.getLog(RMApplicationHistoryWriter.class);
private Dispatcher dispatcher;
@VisibleForTesting
ApplicationHistoryWriter writer;
@VisibleForTesting
boolean historyServiceEnabled;
public RMApplicationHistoryWriter() {
super(RMApplicationHistoryWriter.class.getName());
}
@Override
protected synchronized void serviceInit(Configuration conf) throws Exception {
historyServiceEnabled =
conf.getBoolean(YarnConfiguration.APPLICATION_HISTORY_ENABLED,
YarnConfiguration.DEFAULT_APPLICATION_HISTORY_ENABLED);
if (conf.get(YarnConfiguration.APPLICATION_HISTORY_STORE) == null ||
conf.get(YarnConfiguration.APPLICATION_HISTORY_STORE).length() == 0 ||
conf.get(YarnConfiguration.APPLICATION_HISTORY_STORE).equals(
NullApplicationHistoryStore.class.getName())) {
historyServiceEnabled = false;
}
// Only create the services when the history service is enabled and not
// using the null store, preventing wasting the system resources.
if (historyServiceEnabled) {
writer = createApplicationHistoryStore(conf);
addIfService(writer);
dispatcher = createDispatcher(conf);
dispatcher.register(WritingHistoryEventType.class,
new ForwardingEventHandler());
addIfService(dispatcher);
}
super.serviceInit(conf);
}
protected Dispatcher createDispatcher(Configuration conf) {
MultiThreadedDispatcher dispatcher =
new MultiThreadedDispatcher(
conf
.getInt(
YarnConfiguration.RM_HISTORY_WRITER_MULTI_THREADED_DISPATCHER_POOL_SIZE,
YarnConfiguration.DEFAULT_RM_HISTORY_WRITER_MULTI_THREADED_DISPATCHER_POOL_SIZE));
dispatcher.setDrainEventsOnStop();
return dispatcher;
}
protected ApplicationHistoryStore createApplicationHistoryStore(
Configuration conf) {
try {
Class<? extends ApplicationHistoryStore> storeClass =
conf.getClass(YarnConfiguration.APPLICATION_HISTORY_STORE,
NullApplicationHistoryStore.class,
ApplicationHistoryStore.class);
return storeClass.newInstance();
} catch (Exception e) {
String msg =
"Could not instantiate ApplicationHistoryWriter: "
+ conf.get(YarnConfiguration.APPLICATION_HISTORY_STORE,
NullApplicationHistoryStore.class.getName());
LOG.error(msg, e);
throw new YarnRuntimeException(msg, e);
}
}
protected void handleWritingApplicationHistoryEvent(
WritingApplicationHistoryEvent event) {
switch (event.getType()) {
case APP_START:
WritingApplicationStartEvent wasEvent =
(WritingApplicationStartEvent) event;
try {
writer.applicationStarted(wasEvent.getApplicationStartData());
LOG.info("Stored the start data of application "
+ wasEvent.getApplicationId());
} catch (IOException e) {
LOG.error("Error when storing the start data of application "
+ wasEvent.getApplicationId());
}
break;
case APP_FINISH:
WritingApplicationFinishEvent wafEvent =
(WritingApplicationFinishEvent) event;
try {
writer.applicationFinished(wafEvent.getApplicationFinishData());
LOG.info("Stored the finish data of application "
+ wafEvent.getApplicationId());
} catch (IOException e) {
LOG.error("Error when storing the finish data of application "
+ wafEvent.getApplicationId());
}
break;
case APP_ATTEMPT_START:
WritingApplicationAttemptStartEvent waasEvent =
(WritingApplicationAttemptStartEvent) event;
try {
writer.applicationAttemptStarted(waasEvent
.getApplicationAttemptStartData());
LOG.info("Stored the start data of application attempt "
+ waasEvent.getApplicationAttemptId());
} catch (IOException e) {
LOG.error("Error when storing the start data of application attempt "
+ waasEvent.getApplicationAttemptId());
}
break;
case APP_ATTEMPT_FINISH:
WritingApplicationAttemptFinishEvent waafEvent =
(WritingApplicationAttemptFinishEvent) event;
try {
writer.applicationAttemptFinished(waafEvent
.getApplicationAttemptFinishData());
LOG.info("Stored the finish data of application attempt "
+ waafEvent.getApplicationAttemptId());
} catch (IOException e) {
LOG
.error("Error when storing the finish data of application attempt "
+ waafEvent.getApplicationAttemptId());
}
break;
case CONTAINER_START:
WritingContainerStartEvent wcsEvent =
(WritingContainerStartEvent) event;
try {
writer.containerStarted(wcsEvent.getContainerStartData());
LOG.info("Stored the start data of container "
+ wcsEvent.getContainerId());
} catch (IOException e) {
LOG.error("Error when storing the start data of container "
+ wcsEvent.getContainerId());
}
break;
case CONTAINER_FINISH:
WritingContainerFinishEvent wcfEvent =
(WritingContainerFinishEvent) event;
try {
writer.containerFinished(wcfEvent.getContainerFinishData());
LOG.info("Stored the finish data of container "
+ wcfEvent.getContainerId());
} catch (IOException e) {
LOG.error("Error when storing the finish data of container "
+ wcfEvent.getContainerId());
}
break;
default:
LOG.error("Unknown WritingApplicationHistoryEvent type: "
+ event.getType());
}
}
@SuppressWarnings("unchecked")
public void applicationStarted(RMApp app) {
if (historyServiceEnabled) {
dispatcher.getEventHandler().handle(
new WritingApplicationStartEvent(app.getApplicationId(),
ApplicationStartData.newInstance(app.getApplicationId(), app.getName(),
app.getApplicationType(), app.getQueue(), app.getUser(),
app.getSubmitTime(), app.getStartTime())));
}
}
@SuppressWarnings("unchecked")
public void applicationFinished(RMApp app, RMAppState finalState) {
if (historyServiceEnabled) {
dispatcher.getEventHandler().handle(
new WritingApplicationFinishEvent(app.getApplicationId(),
ApplicationFinishData.newInstance(app.getApplicationId(),
app.getFinishTime(), app.getDiagnostics().toString(),
app.getFinalApplicationStatus(),
RMServerUtils.createApplicationState(finalState))));
}
}
@SuppressWarnings("unchecked")
public void applicationAttemptStarted(RMAppAttempt appAttempt) {
if (historyServiceEnabled) {
dispatcher.getEventHandler().handle(
new WritingApplicationAttemptStartEvent(appAttempt.getAppAttemptId(),
ApplicationAttemptStartData.newInstance(appAttempt.getAppAttemptId(),
appAttempt.getHost(), appAttempt.getRpcPort(), appAttempt
.getMasterContainer().getId())));
}
}
@SuppressWarnings("unchecked")
public void applicationAttemptFinished(RMAppAttempt appAttempt,
RMAppAttemptState finalState) {
if (historyServiceEnabled) {
dispatcher.getEventHandler().handle(
new WritingApplicationAttemptFinishEvent(appAttempt.getAppAttemptId(),
ApplicationAttemptFinishData.newInstance(
appAttempt.getAppAttemptId(), appAttempt.getDiagnostics()
.toString(), appAttempt.getTrackingUrl(), appAttempt
.getFinalApplicationStatus(),
RMServerUtils.createApplicationAttemptState(finalState))));
}
}
@SuppressWarnings("unchecked")
public void containerStarted(RMContainer container) {
if (historyServiceEnabled) {
dispatcher.getEventHandler().handle(
new WritingContainerStartEvent(container.getContainerId(),
ContainerStartData.newInstance(container.getContainerId(),
container.getAllocatedResource(), container.getAllocatedNode(),
container.getAllocatedPriority(), container.getCreationTime())));
}
}
@SuppressWarnings("unchecked")
public void containerFinished(RMContainer container) {
if (historyServiceEnabled) {
dispatcher.getEventHandler().handle(
new WritingContainerFinishEvent(container.getContainerId(),
ContainerFinishData.newInstance(container.getContainerId(),
container.getFinishTime(), container.getDiagnosticsInfo(),
container.getContainerExitStatus(),
container.getContainerState())));
}
}
/**
* EventHandler implementation which forward events to HistoryWriter Making
* use of it, HistoryWriter can avoid to have a public handle method
*/
private final class ForwardingEventHandler implements
EventHandler<WritingApplicationHistoryEvent> {
@Override
public void handle(WritingApplicationHistoryEvent event) {
handleWritingApplicationHistoryEvent(event);
}
}
@SuppressWarnings({ "rawtypes", "unchecked" })
protected static class MultiThreadedDispatcher extends CompositeService
implements Dispatcher {
private List<AsyncDispatcher> dispatchers =
new ArrayList<AsyncDispatcher>();
public MultiThreadedDispatcher(int num) {
super(MultiThreadedDispatcher.class.getName());
for (int i = 0; i < num; ++i) {
AsyncDispatcher dispatcher = createDispatcher();
dispatchers.add(dispatcher);
addIfService(dispatcher);
}
}
@Override
public EventHandler getEventHandler() {
return new CompositEventHandler();
}
@Override
public void register(Class<? extends Enum> eventType, EventHandler handler) {
for (AsyncDispatcher dispatcher : dispatchers) {
dispatcher.register(eventType, handler);
}
}
public void setDrainEventsOnStop() {
for (AsyncDispatcher dispatcher : dispatchers) {
dispatcher.setDrainEventsOnStop();
}
}
private class CompositEventHandler implements EventHandler<Event> {
@Override
public void handle(Event event) {
// Use hashCode (of ApplicationId) to dispatch the event to the child
// dispatcher, such that all the writing events of one application will
// be handled by one thread, the scheduled order of the these events
// will be preserved
int index = (event.hashCode() & Integer.MAX_VALUE) % dispatchers.size();
dispatchers.get(index).getEventHandler().handle(event);
}
}
protected AsyncDispatcher createDispatcher() {
return new AsyncDispatcher();
}
}
}
| 14,583 | 39.065934 | 100 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ahs/WritingHistoryEventType.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.resourcemanager.ahs;
public enum WritingHistoryEventType {
APP_START, APP_FINISH, APP_ATTEMPT_START, APP_ATTEMPT_FINISH,
CONTAINER_START, CONTAINER_FINISH
}
| 1,007 | 41 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ahs/WritingApplicationAttemptFinishEvent.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.resourcemanager.ahs;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ApplicationAttemptFinishData;
public class WritingApplicationAttemptFinishEvent extends
WritingApplicationHistoryEvent {
private ApplicationAttemptId appAttemptId;
private ApplicationAttemptFinishData appAttemptFinish;
public WritingApplicationAttemptFinishEvent(
ApplicationAttemptId appAttemptId,
ApplicationAttemptFinishData appAttemptFinish) {
super(WritingHistoryEventType.APP_ATTEMPT_FINISH);
this.appAttemptId = appAttemptId;
this.appAttemptFinish = appAttemptFinish;
}
@Override
public int hashCode() {
return appAttemptId.getApplicationId().hashCode();
}
public ApplicationAttemptId getApplicationAttemptId() {
return appAttemptId;
}
public ApplicationAttemptFinishData getApplicationAttemptFinishData() {
return appAttemptFinish;
}
}
| 1,816 | 33.942308 | 100 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ahs/WritingApplicationStartEvent.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.resourcemanager.ahs;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ApplicationStartData;
public class WritingApplicationStartEvent extends
WritingApplicationHistoryEvent {
private ApplicationId appId;
private ApplicationStartData appStart;
public WritingApplicationStartEvent(ApplicationId appId,
ApplicationStartData appStart) {
super(WritingHistoryEventType.APP_START);
this.appId = appId;
this.appStart = appStart;
}
@Override
public int hashCode() {
return appId.hashCode();
}
public ApplicationId getApplicationId() {
return appId;
}
public ApplicationStartData getApplicationStartData() {
return appStart;
}
}
| 1,608 | 30.54902 | 92 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ahs/WritingApplicationFinishEvent.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.resourcemanager.ahs;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ApplicationFinishData;
public class WritingApplicationFinishEvent extends
WritingApplicationHistoryEvent {
private ApplicationId appId;
private ApplicationFinishData appFinish;
public WritingApplicationFinishEvent(ApplicationId appId,
ApplicationFinishData appFinish) {
super(WritingHistoryEventType.APP_FINISH);
this.appId = appId;
this.appFinish = appFinish;
}
@Override
public int hashCode() {
return appId.hashCode();
}
public ApplicationId getApplicationId() {
return appId;
}
public ApplicationFinishData getApplicationFinishData() {
return appFinish;
}
}
| 1,621 | 30.803922 | 93 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ahs/WritingApplicationAttemptStartEvent.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.resourcemanager.ahs;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ApplicationAttemptStartData;
public class WritingApplicationAttemptStartEvent extends
WritingApplicationHistoryEvent {
private ApplicationAttemptId appAttemptId;
private ApplicationAttemptStartData appAttemptStart;
public WritingApplicationAttemptStartEvent(ApplicationAttemptId appAttemptId,
ApplicationAttemptStartData appAttemptStart) {
super(WritingHistoryEventType.APP_ATTEMPT_START);
this.appAttemptId = appAttemptId;
this.appAttemptStart = appAttemptStart;
}
@Override
public int hashCode() {
return appAttemptId.getApplicationId().hashCode();
}
public ApplicationAttemptId getApplicationAttemptId() {
return appAttemptId;
}
public ApplicationAttemptStartData getApplicationAttemptStartData() {
return appAttemptStart;
}
}
| 1,796 | 34.235294 | 99 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ahs/WritingContainerStartEvent.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.resourcemanager.ahs;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ContainerStartData;
public class WritingContainerStartEvent extends WritingApplicationHistoryEvent {
private ContainerId containerId;
private ContainerStartData containerStart;
public WritingContainerStartEvent(ContainerId containerId,
ContainerStartData containerStart) {
super(WritingHistoryEventType.CONTAINER_START);
this.containerId = containerId;
this.containerStart = containerStart;
}
@Override
public int hashCode() {
return containerId.getApplicationAttemptId().getApplicationId().hashCode();
}
public ContainerId getContainerId() {
return containerId;
}
public ContainerStartData getContainerStartData() {
return containerStart;
}
}
| 1,697 | 32.96 | 90 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ahs/WritingApplicationHistoryEvent.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.resourcemanager.ahs;
import org.apache.hadoop.yarn.event.AbstractEvent;
public class WritingApplicationHistoryEvent extends
AbstractEvent<WritingHistoryEventType> {
public WritingApplicationHistoryEvent(WritingHistoryEventType type) {
super(type);
}
}
| 1,113 | 36.133333 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/resource/ResourceType.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.resourcemanager.resource;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Evolving;
@Private
@Evolving
public enum ResourceType {
MEMORY, CPU
}
| 1,056 | 35.448276 | 74 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/resource/ResourceWeights.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.resourcemanager.resource;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Evolving;
import org.apache.hadoop.util.StringUtils;
@Private
@Evolving
public class ResourceWeights {
public static final ResourceWeights NEUTRAL = new ResourceWeights(1.0f);
private float[] weights = new float[ResourceType.values().length];
public ResourceWeights(float memoryWeight, float cpuWeight) {
weights[ResourceType.MEMORY.ordinal()] = memoryWeight;
weights[ResourceType.CPU.ordinal()] = cpuWeight;
}
public ResourceWeights(float weight) {
setWeight(weight);
}
public ResourceWeights() { }
public void setWeight(float weight) {
for (int i = 0; i < weights.length; i++) {
weights[i] = weight;
}
}
public void setWeight(ResourceType resourceType, float weight) {
weights[resourceType.ordinal()] = weight;
}
public float getWeight(ResourceType resourceType) {
return weights[resourceType.ordinal()];
}
public String toString() {
StringBuffer sb = new StringBuffer();
sb.append("<");
for (int i = 0; i < ResourceType.values().length; i++) {
if (i != 0) {
sb.append(", ");
}
ResourceType resourceType = ResourceType.values()[i];
sb.append(StringUtils.toLowerCase(resourceType.name()));
sb.append(StringUtils.format(" weight=%.1f", getWeight(resourceType)));
}
sb.append(">");
return sb.toString();
}
}
| 2,345 | 31.583333 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/resource/Priority.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.resourcemanager.resource;
import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
public class Priority {
public static org.apache.hadoop.yarn.api.records.Priority create(int prio) {
org.apache.hadoop.yarn.api.records.Priority priority = RecordFactoryProvider.getRecordFactory(null).newRecordInstance(org.apache.hadoop.yarn.api.records.Priority.class);
priority.setPriority(prio);
return priority;
}
public static class Comparator
implements java.util.Comparator<org.apache.hadoop.yarn.api.records.Priority> {
@Override
public int compare(org.apache.hadoop.yarn.api.records.Priority o1, org.apache.hadoop.yarn.api.records.Priority o2) {
return o1.getPriority() - o2.getPriority();
}
}
}
| 1,584 | 38.625 | 173 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.resourcemanager.rmnode;
import java.util.ArrayList;
import java.util.EnumSet;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import java.util.TreeSet;
import java.util.concurrent.ConcurrentLinkedQueue;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import java.util.concurrent.locks.ReentrantReadWriteLock.ReadLock;
import java.util.concurrent.locks.ReentrantReadWriteLock.WriteLock;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.net.Node;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.ContainerState;
import org.apache.hadoop.yarn.api.records.ContainerStatus;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.api.records.NodeState;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.api.records.ResourceOption;
import org.apache.hadoop.yarn.event.EventHandler;
import org.apache.hadoop.yarn.factories.RecordFactory;
import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
import org.apache.hadoop.yarn.nodelabels.CommonNodeLabelsManager;
import org.apache.hadoop.yarn.server.api.protocolrecords.LogAggregationReport;
import org.apache.hadoop.yarn.server.api.protocolrecords.NMContainerStatus;
import org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatResponse;
import org.apache.hadoop.yarn.server.api.records.NodeHealthStatus;
import org.apache.hadoop.yarn.server.resourcemanager.ClusterMetrics;
import org.apache.hadoop.yarn.server.resourcemanager.NodesListManagerEvent;
import org.apache.hadoop.yarn.server.resourcemanager.NodesListManagerEventType;
import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppImpl;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppRunningOnNodeEvent;
import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.ContainerAllocationExpirer;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeAddedSchedulerEvent;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeRemovedSchedulerEvent;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeResourceUpdateSchedulerEvent;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeUpdateSchedulerEvent;
import org.apache.hadoop.yarn.server.utils.BuilderUtils.ContainerIdComparator;
import org.apache.hadoop.yarn.state.InvalidStateTransitionException;
import org.apache.hadoop.yarn.state.MultipleArcTransition;
import org.apache.hadoop.yarn.state.SingleArcTransition;
import org.apache.hadoop.yarn.state.StateMachine;
import org.apache.hadoop.yarn.state.StateMachineFactory;
import com.google.common.annotations.VisibleForTesting;
/**
* This class is used to keep track of all the applications/containers
* running on a node.
*
*/
@Private
@Unstable
@SuppressWarnings("unchecked")
public class RMNodeImpl implements RMNode, EventHandler<RMNodeEvent> {
private static final Log LOG = LogFactory.getLog(RMNodeImpl.class);
private static final RecordFactory recordFactory = RecordFactoryProvider
.getRecordFactory(null);
private final ReadLock readLock;
private final WriteLock writeLock;
private final ConcurrentLinkedQueue<UpdatedContainerInfo> nodeUpdateQueue;
private volatile boolean nextHeartBeat = true;
private final NodeId nodeId;
private final RMContext context;
private final String hostName;
private final int commandPort;
private int httpPort;
private final String nodeAddress; // The containerManager address
private String httpAddress;
private volatile Resource totalCapability;
private final Node node;
private String healthReport;
private long lastHealthReportTime;
private String nodeManagerVersion;
private final ContainerAllocationExpirer containerAllocationExpirer;
/* set of containers that have just launched */
private final Set<ContainerId> launchedContainers =
new HashSet<ContainerId>();
/* set of containers that need to be cleaned */
private final Set<ContainerId> containersToClean = new TreeSet<ContainerId>(
new ContainerIdComparator());
/*
* set of containers to notify NM to remove them from its context. Currently,
* this includes containers that were notified to AM about their completion
*/
private final Set<ContainerId> containersToBeRemovedFromNM =
new HashSet<ContainerId>();
/* the list of applications that have finished and need to be purged */
private final List<ApplicationId> finishedApplications =
new ArrayList<ApplicationId>();
/* the list of applications that are running on this node */
private final List<ApplicationId> runningApplications =
new ArrayList<ApplicationId>();
private NodeHeartbeatResponse latestNodeHeartBeatResponse = recordFactory
.newRecordInstance(NodeHeartbeatResponse.class);
private static final StateMachineFactory<RMNodeImpl,
NodeState,
RMNodeEventType,
RMNodeEvent> stateMachineFactory
= new StateMachineFactory<RMNodeImpl,
NodeState,
RMNodeEventType,
RMNodeEvent>(NodeState.NEW)
//Transitions from NEW state
.addTransition(NodeState.NEW, NodeState.RUNNING,
RMNodeEventType.STARTED, new AddNodeTransition())
.addTransition(NodeState.NEW, NodeState.NEW,
RMNodeEventType.RESOURCE_UPDATE,
new UpdateNodeResourceWhenUnusableTransition())
//Transitions from RUNNING state
.addTransition(NodeState.RUNNING,
EnumSet.of(NodeState.RUNNING, NodeState.UNHEALTHY),
RMNodeEventType.STATUS_UPDATE, new StatusUpdateWhenHealthyTransition())
.addTransition(NodeState.RUNNING, NodeState.DECOMMISSIONED,
RMNodeEventType.DECOMMISSION,
new DeactivateNodeTransition(NodeState.DECOMMISSIONED))
.addTransition(NodeState.RUNNING, NodeState.LOST,
RMNodeEventType.EXPIRE,
new DeactivateNodeTransition(NodeState.LOST))
.addTransition(NodeState.RUNNING, NodeState.REBOOTED,
RMNodeEventType.REBOOTING,
new DeactivateNodeTransition(NodeState.REBOOTED))
.addTransition(NodeState.RUNNING, NodeState.RUNNING,
RMNodeEventType.CLEANUP_APP, new CleanUpAppTransition())
.addTransition(NodeState.RUNNING, NodeState.RUNNING,
RMNodeEventType.CLEANUP_CONTAINER, new CleanUpContainerTransition())
.addTransition(NodeState.RUNNING, NodeState.RUNNING,
RMNodeEventType.FINISHED_CONTAINERS_PULLED_BY_AM,
new AddContainersToBeRemovedFromNMTransition())
.addTransition(NodeState.RUNNING, NodeState.RUNNING,
RMNodeEventType.RECONNECTED, new ReconnectNodeTransition())
.addTransition(NodeState.RUNNING, NodeState.RUNNING,
RMNodeEventType.RESOURCE_UPDATE, new UpdateNodeResourceWhenRunningTransition())
.addTransition(NodeState.RUNNING, NodeState.SHUTDOWN,
RMNodeEventType.SHUTDOWN,
new DeactivateNodeTransition(NodeState.SHUTDOWN))
//Transitions from REBOOTED state
.addTransition(NodeState.REBOOTED, NodeState.REBOOTED,
RMNodeEventType.RESOURCE_UPDATE,
new UpdateNodeResourceWhenUnusableTransition())
//Transitions from DECOMMISSIONED state
.addTransition(NodeState.DECOMMISSIONED, NodeState.DECOMMISSIONED,
RMNodeEventType.RESOURCE_UPDATE,
new UpdateNodeResourceWhenUnusableTransition())
.addTransition(NodeState.DECOMMISSIONED, NodeState.DECOMMISSIONED,
RMNodeEventType.FINISHED_CONTAINERS_PULLED_BY_AM,
new AddContainersToBeRemovedFromNMTransition())
//Transitions from LOST state
.addTransition(NodeState.LOST, NodeState.LOST,
RMNodeEventType.RESOURCE_UPDATE,
new UpdateNodeResourceWhenUnusableTransition())
.addTransition(NodeState.LOST, NodeState.LOST,
RMNodeEventType.FINISHED_CONTAINERS_PULLED_BY_AM,
new AddContainersToBeRemovedFromNMTransition())
//Transitions from UNHEALTHY state
.addTransition(NodeState.UNHEALTHY,
EnumSet.of(NodeState.UNHEALTHY, NodeState.RUNNING),
RMNodeEventType.STATUS_UPDATE,
new StatusUpdateWhenUnHealthyTransition())
.addTransition(NodeState.UNHEALTHY, NodeState.DECOMMISSIONED,
RMNodeEventType.DECOMMISSION,
new DeactivateNodeTransition(NodeState.DECOMMISSIONED))
.addTransition(NodeState.UNHEALTHY, NodeState.LOST,
RMNodeEventType.EXPIRE,
new DeactivateNodeTransition(NodeState.LOST))
.addTransition(NodeState.UNHEALTHY, NodeState.REBOOTED,
RMNodeEventType.REBOOTING,
new DeactivateNodeTransition(NodeState.REBOOTED))
.addTransition(NodeState.UNHEALTHY, NodeState.UNHEALTHY,
RMNodeEventType.RECONNECTED, new ReconnectNodeTransition())
.addTransition(NodeState.UNHEALTHY, NodeState.UNHEALTHY,
RMNodeEventType.CLEANUP_APP, new CleanUpAppTransition())
.addTransition(NodeState.UNHEALTHY, NodeState.UNHEALTHY,
RMNodeEventType.CLEANUP_CONTAINER, new CleanUpContainerTransition())
.addTransition(NodeState.UNHEALTHY, NodeState.UNHEALTHY,
RMNodeEventType.RESOURCE_UPDATE, new UpdateNodeResourceWhenUnusableTransition())
.addTransition(NodeState.UNHEALTHY, NodeState.UNHEALTHY,
RMNodeEventType.FINISHED_CONTAINERS_PULLED_BY_AM,
new AddContainersToBeRemovedFromNMTransition())
.addTransition(NodeState.UNHEALTHY, NodeState.SHUTDOWN,
RMNodeEventType.SHUTDOWN,
new DeactivateNodeTransition(NodeState.SHUTDOWN))
//Transitions from SHUTDOWN state
.addTransition(NodeState.SHUTDOWN, NodeState.SHUTDOWN,
RMNodeEventType.RESOURCE_UPDATE,
new UpdateNodeResourceWhenUnusableTransition())
.addTransition(NodeState.SHUTDOWN, NodeState.SHUTDOWN,
RMNodeEventType.FINISHED_CONTAINERS_PULLED_BY_AM,
new AddContainersToBeRemovedFromNMTransition())
// create the topology tables
.installTopology();
private final StateMachine<NodeState, RMNodeEventType,
RMNodeEvent> stateMachine;
public RMNodeImpl(NodeId nodeId, RMContext context, String hostName,
int cmPort, int httpPort, Node node, Resource capability, String nodeManagerVersion) {
this.nodeId = nodeId;
this.context = context;
this.hostName = hostName;
this.commandPort = cmPort;
this.httpPort = httpPort;
this.totalCapability = capability;
this.nodeAddress = hostName + ":" + cmPort;
this.httpAddress = hostName + ":" + httpPort;
this.node = node;
this.healthReport = "Healthy";
this.lastHealthReportTime = System.currentTimeMillis();
this.nodeManagerVersion = nodeManagerVersion;
this.latestNodeHeartBeatResponse.setResponseId(0);
ReentrantReadWriteLock lock = new ReentrantReadWriteLock();
this.readLock = lock.readLock();
this.writeLock = lock.writeLock();
this.stateMachine = stateMachineFactory.make(this);
this.nodeUpdateQueue = new ConcurrentLinkedQueue<UpdatedContainerInfo>();
this.containerAllocationExpirer = context.getContainerAllocationExpirer();
}
@Override
public String toString() {
return this.nodeId.toString();
}
@Override
public String getHostName() {
return hostName;
}
@Override
public int getCommandPort() {
return commandPort;
}
@Override
public int getHttpPort() {
return httpPort;
}
@Override
public NodeId getNodeID() {
return this.nodeId;
}
@Override
public String getNodeAddress() {
return this.nodeAddress;
}
@Override
public String getHttpAddress() {
return this.httpAddress;
}
@Override
public Resource getTotalCapability() {
return this.totalCapability;
}
@Override
public String getRackName() {
return node.getNetworkLocation();
}
@Override
public Node getNode() {
return this.node;
}
@Override
public String getHealthReport() {
this.readLock.lock();
try {
return this.healthReport;
} finally {
this.readLock.unlock();
}
}
public void setHealthReport(String healthReport) {
this.writeLock.lock();
try {
this.healthReport = healthReport;
} finally {
this.writeLock.unlock();
}
}
public void setLastHealthReportTime(long lastHealthReportTime) {
this.writeLock.lock();
try {
this.lastHealthReportTime = lastHealthReportTime;
} finally {
this.writeLock.unlock();
}
}
@Override
public long getLastHealthReportTime() {
this.readLock.lock();
try {
return this.lastHealthReportTime;
} finally {
this.readLock.unlock();
}
}
@Override
public String getNodeManagerVersion() {
return nodeManagerVersion;
}
@Override
public NodeState getState() {
this.readLock.lock();
try {
return this.stateMachine.getCurrentState();
} finally {
this.readLock.unlock();
}
}
@Override
public List<ApplicationId> getAppsToCleanup() {
this.readLock.lock();
try {
return new ArrayList<ApplicationId>(this.finishedApplications);
} finally {
this.readLock.unlock();
}
}
@Override
public List<ApplicationId> getRunningApps() {
this.readLock.lock();
try {
return new ArrayList<ApplicationId>(this.runningApplications);
} finally {
this.readLock.unlock();
}
}
@Override
public List<ContainerId> getContainersToCleanUp() {
this.readLock.lock();
try {
return new ArrayList<ContainerId>(this.containersToClean);
} finally {
this.readLock.unlock();
}
};
@Override
public void updateNodeHeartbeatResponseForCleanup(NodeHeartbeatResponse response) {
this.writeLock.lock();
try {
response.addAllContainersToCleanup(
new ArrayList<ContainerId>(this.containersToClean));
response.addAllApplicationsToCleanup(this.finishedApplications);
response.addContainersToBeRemovedFromNM(
new ArrayList<ContainerId>(this.containersToBeRemovedFromNM));
this.containersToClean.clear();
this.finishedApplications.clear();
this.containersToBeRemovedFromNM.clear();
} finally {
this.writeLock.unlock();
}
};
@Override
public NodeHeartbeatResponse getLastNodeHeartBeatResponse() {
this.readLock.lock();
try {
return this.latestNodeHeartBeatResponse;
} finally {
this.readLock.unlock();
}
}
public void handle(RMNodeEvent event) {
LOG.debug("Processing " + event.getNodeId() + " of type " + event.getType());
try {
writeLock.lock();
NodeState oldState = getState();
try {
stateMachine.doTransition(event.getType(), event);
} catch (InvalidStateTransitionException e) {
LOG.error("Can't handle this event at current state", e);
LOG.error("Invalid event " + event.getType() +
" on Node " + this.nodeId);
}
if (oldState != getState()) {
LOG.info(nodeId + " Node Transitioned from " + oldState + " to "
+ getState());
}
}
finally {
writeLock.unlock();
}
}
private void updateMetricsForRejoinedNode(NodeState previousNodeState) {
ClusterMetrics metrics = ClusterMetrics.getMetrics();
metrics.incrNumActiveNodes();
switch (previousNodeState) {
case LOST:
metrics.decrNumLostNMs();
break;
case REBOOTED:
metrics.decrNumRebootedNMs();
break;
case DECOMMISSIONED:
metrics.decrDecommisionedNMs();
break;
case UNHEALTHY:
metrics.decrNumUnhealthyNMs();
break;
case SHUTDOWN:
metrics.decrNumShutdownNMs();
break;
default:
LOG.debug("Unexpected previous node state");
}
}
private void updateMetricsForDeactivatedNode(NodeState initialState,
NodeState finalState) {
ClusterMetrics metrics = ClusterMetrics.getMetrics();
switch (initialState) {
case RUNNING:
metrics.decrNumActiveNodes();
break;
case UNHEALTHY:
metrics.decrNumUnhealthyNMs();
break;
default:
LOG.debug("Unexpected inital state");
}
switch (finalState) {
case DECOMMISSIONED:
metrics.incrDecommisionedNMs();
break;
case LOST:
metrics.incrNumLostNMs();
break;
case REBOOTED:
metrics.incrNumRebootedNMs();
break;
case UNHEALTHY:
metrics.incrNumUnhealthyNMs();
break;
case SHUTDOWN:
metrics.incrNumShutdownNMs();
break;
default:
LOG.debug("Unexpected final state");
}
}
private static void handleRunningAppOnNode(RMNodeImpl rmNode,
RMContext context, ApplicationId appId, NodeId nodeId) {
RMApp app = context.getRMApps().get(appId);
// if we failed getting app by appId, maybe something wrong happened, just
// add the app to the finishedApplications list so that the app can be
// cleaned up on the NM
if (null == app) {
LOG.warn("Cannot get RMApp by appId=" + appId
+ ", just added it to finishedApplications list for cleanup");
rmNode.finishedApplications.add(appId);
rmNode.runningApplications.remove(appId);
return;
}
// Add running applications back due to Node add or Node reconnection.
rmNode.runningApplications.add(appId);
context.getDispatcher().getEventHandler()
.handle(new RMAppRunningOnNodeEvent(appId, nodeId));
}
private static void updateNodeResourceFromEvent(RMNodeImpl rmNode,
RMNodeResourceUpdateEvent event){
ResourceOption resourceOption = event.getResourceOption();
// Set resource on RMNode
rmNode.totalCapability = resourceOption.getResource();
}
public static class AddNodeTransition implements
SingleArcTransition<RMNodeImpl, RMNodeEvent> {
@Override
public void transition(RMNodeImpl rmNode, RMNodeEvent event) {
// Inform the scheduler
RMNodeStartedEvent startEvent = (RMNodeStartedEvent) event;
List<NMContainerStatus> containers = null;
NodeId nodeId = rmNode.nodeId;
if (rmNode.context.getInactiveRMNodes().containsKey(nodeId)) {
// Old node rejoining
RMNode previouRMNode = rmNode.context.getInactiveRMNodes().get(nodeId);
rmNode.context.getInactiveRMNodes().remove(nodeId);
rmNode.updateMetricsForRejoinedNode(previouRMNode.getState());
} else {
// Increment activeNodes explicitly because this is a new node.
ClusterMetrics.getMetrics().incrNumActiveNodes();
containers = startEvent.getNMContainerStatuses();
if (containers != null && !containers.isEmpty()) {
for (NMContainerStatus container : containers) {
if (container.getContainerState() == ContainerState.RUNNING) {
rmNode.launchedContainers.add(container.getContainerId());
}
}
}
}
if (null != startEvent.getRunningApplications()) {
for (ApplicationId appId : startEvent.getRunningApplications()) {
handleRunningAppOnNode(rmNode, rmNode.context, appId, rmNode.nodeId);
}
}
rmNode.context.getDispatcher().getEventHandler()
.handle(new NodeAddedSchedulerEvent(rmNode, containers));
rmNode.context.getDispatcher().getEventHandler().handle(
new NodesListManagerEvent(
NodesListManagerEventType.NODE_USABLE, rmNode));
}
}
public static class ReconnectNodeTransition implements
SingleArcTransition<RMNodeImpl, RMNodeEvent> {
@Override
public void transition(RMNodeImpl rmNode, RMNodeEvent event) {
RMNodeReconnectEvent reconnectEvent = (RMNodeReconnectEvent) event;
RMNode newNode = reconnectEvent.getReconnectedNode();
rmNode.nodeManagerVersion = newNode.getNodeManagerVersion();
List<ApplicationId> runningApps = reconnectEvent.getRunningApplications();
boolean noRunningApps =
(runningApps == null) || (runningApps.size() == 0);
// No application running on the node, so send node-removal event with
// cleaning up old container info.
if (noRunningApps) {
rmNode.nodeUpdateQueue.clear();
rmNode.context.getDispatcher().getEventHandler().handle(
new NodeRemovedSchedulerEvent(rmNode));
if (rmNode.getHttpPort() == newNode.getHttpPort()) {
// Reset heartbeat ID since node just restarted.
rmNode.getLastNodeHeartBeatResponse().setResponseId(0);
if (!rmNode.getTotalCapability().equals(
newNode.getTotalCapability())) {
rmNode.totalCapability = newNode.getTotalCapability();
}
if (rmNode.getState().equals(NodeState.RUNNING)) {
// Only add old node if old state is RUNNING
rmNode.context.getDispatcher().getEventHandler().handle(
new NodeAddedSchedulerEvent(rmNode));
}
} else {
// Reconnected node differs, so replace old node and start new node
switch (rmNode.getState()) {
case RUNNING:
ClusterMetrics.getMetrics().decrNumActiveNodes();
break;
case UNHEALTHY:
ClusterMetrics.getMetrics().decrNumUnhealthyNMs();
break;
default:
LOG.debug("Unexpected Rmnode state");
}
rmNode.context.getRMNodes().put(newNode.getNodeID(), newNode);
rmNode.context.getDispatcher().getEventHandler().handle(
new RMNodeStartedEvent(newNode.getNodeID(), null, null));
}
} else {
rmNode.httpPort = newNode.getHttpPort();
rmNode.httpAddress = newNode.getHttpAddress();
boolean isCapabilityChanged = false;
if (!rmNode.getTotalCapability().equals(
newNode.getTotalCapability())) {
rmNode.totalCapability = newNode.getTotalCapability();
isCapabilityChanged = true;
}
handleNMContainerStatus(reconnectEvent.getNMContainerStatuses(), rmNode);
// Reset heartbeat ID since node just restarted.
rmNode.getLastNodeHeartBeatResponse().setResponseId(0);
for (ApplicationId appId : reconnectEvent.getRunningApplications()) {
handleRunningAppOnNode(rmNode, rmNode.context, appId, rmNode.nodeId);
}
if (isCapabilityChanged
&& rmNode.getState().equals(NodeState.RUNNING)) {
// Update scheduler node's capacity for reconnect node.
rmNode.context
.getDispatcher()
.getEventHandler()
.handle(
new NodeResourceUpdateSchedulerEvent(rmNode, ResourceOption
.newInstance(newNode.getTotalCapability(), -1)));
}
}
}
private void handleNMContainerStatus(
List<NMContainerStatus> nmContainerStatuses, RMNodeImpl rmnode) {
List<ContainerStatus> containerStatuses =
new ArrayList<ContainerStatus>();
for (NMContainerStatus nmContainerStatus : nmContainerStatuses) {
containerStatuses.add(createContainerStatus(nmContainerStatus));
}
rmnode.handleContainerStatus(containerStatuses);
}
private ContainerStatus createContainerStatus(
NMContainerStatus remoteContainer) {
ContainerStatus cStatus =
ContainerStatus.newInstance(remoteContainer.getContainerId(),
remoteContainer.getContainerState(),
remoteContainer.getDiagnostics(),
remoteContainer.getContainerExitStatus());
return cStatus;
}
}
public static class UpdateNodeResourceWhenRunningTransition
implements SingleArcTransition<RMNodeImpl, RMNodeEvent> {
@Override
public void transition(RMNodeImpl rmNode, RMNodeEvent event) {
RMNodeResourceUpdateEvent updateEvent = (RMNodeResourceUpdateEvent)event;
updateNodeResourceFromEvent(rmNode, updateEvent);
// Notify new resourceOption to scheduler
rmNode.context.getDispatcher().getEventHandler().handle(
new NodeResourceUpdateSchedulerEvent(rmNode, updateEvent.getResourceOption()));
}
}
public static class UpdateNodeResourceWhenUnusableTransition
implements SingleArcTransition<RMNodeImpl, RMNodeEvent> {
@Override
public void transition(RMNodeImpl rmNode, RMNodeEvent event) {
// The node is not usable, only log a warn message
LOG.warn("Try to update resource on a "+ rmNode.getState().toString() +
" node: "+rmNode.toString());
updateNodeResourceFromEvent(rmNode, (RMNodeResourceUpdateEvent)event);
// No need to notify scheduler as schedulerNode is not function now
// and can sync later from RMnode.
}
}
public static class CleanUpAppTransition
implements SingleArcTransition<RMNodeImpl, RMNodeEvent> {
@Override
public void transition(RMNodeImpl rmNode, RMNodeEvent event) {
ApplicationId appId = ((RMNodeCleanAppEvent) event).getAppId();
rmNode.finishedApplications.add(appId);
rmNode.runningApplications.remove(appId);
}
}
public static class CleanUpContainerTransition implements
SingleArcTransition<RMNodeImpl, RMNodeEvent> {
@Override
public void transition(RMNodeImpl rmNode, RMNodeEvent event) {
rmNode.containersToClean.add(((
RMNodeCleanContainerEvent) event).getContainerId());
}
}
public static class AddContainersToBeRemovedFromNMTransition implements
SingleArcTransition<RMNodeImpl, RMNodeEvent> {
@Override
public void transition(RMNodeImpl rmNode, RMNodeEvent event) {
rmNode.containersToBeRemovedFromNM.addAll(((
RMNodeFinishedContainersPulledByAMEvent) event).getContainers());
}
}
public static class DeactivateNodeTransition
implements SingleArcTransition<RMNodeImpl, RMNodeEvent> {
private final NodeState finalState;
public DeactivateNodeTransition(NodeState finalState) {
this.finalState = finalState;
}
@Override
public void transition(RMNodeImpl rmNode, RMNodeEvent event) {
// Inform the scheduler
rmNode.nodeUpdateQueue.clear();
// If the current state is NodeState.UNHEALTHY
// Then node is already been removed from the
// Scheduler
NodeState initialState = rmNode.getState();
if (!initialState.equals(NodeState.UNHEALTHY)) {
rmNode.context.getDispatcher().getEventHandler()
.handle(new NodeRemovedSchedulerEvent(rmNode));
}
rmNode.context.getDispatcher().getEventHandler().handle(
new NodesListManagerEvent(
NodesListManagerEventType.NODE_UNUSABLE, rmNode));
// Deactivate the node
rmNode.context.getRMNodes().remove(rmNode.nodeId);
LOG.info("Deactivating Node " + rmNode.nodeId + " as it is now "
+ finalState);
rmNode.context.getInactiveRMNodes().put(rmNode.nodeId, rmNode);
//Update the metrics
rmNode.updateMetricsForDeactivatedNode(initialState, finalState);
}
}
public static class StatusUpdateWhenHealthyTransition implements
MultipleArcTransition<RMNodeImpl, RMNodeEvent, NodeState> {
@Override
public NodeState transition(RMNodeImpl rmNode, RMNodeEvent event) {
RMNodeStatusEvent statusEvent = (RMNodeStatusEvent) event;
// Switch the last heartbeatresponse.
rmNode.latestNodeHeartBeatResponse = statusEvent.getLatestResponse();
NodeHealthStatus remoteNodeHealthStatus =
statusEvent.getNodeHealthStatus();
rmNode.setHealthReport(remoteNodeHealthStatus.getHealthReport());
rmNode.setLastHealthReportTime(
remoteNodeHealthStatus.getLastHealthReportTime());
if (!remoteNodeHealthStatus.getIsNodeHealthy()) {
LOG.info("Node " + rmNode.nodeId + " reported UNHEALTHY with details: "
+ remoteNodeHealthStatus.getHealthReport());
rmNode.nodeUpdateQueue.clear();
// Inform the scheduler
rmNode.context.getDispatcher().getEventHandler().handle(
new NodeRemovedSchedulerEvent(rmNode));
rmNode.context.getDispatcher().getEventHandler().handle(
new NodesListManagerEvent(
NodesListManagerEventType.NODE_UNUSABLE, rmNode));
// Update metrics
rmNode.updateMetricsForDeactivatedNode(rmNode.getState(),
NodeState.UNHEALTHY);
return NodeState.UNHEALTHY;
}
rmNode.handleContainerStatus(statusEvent.getContainers());
List<LogAggregationReport> logAggregationReportsForApps =
statusEvent.getLogAggregationReportsForApps();
if (logAggregationReportsForApps != null
&& !logAggregationReportsForApps.isEmpty()) {
rmNode.handleLogAggregationStatus(logAggregationReportsForApps);
}
if(rmNode.nextHeartBeat) {
rmNode.nextHeartBeat = false;
rmNode.context.getDispatcher().getEventHandler().handle(
new NodeUpdateSchedulerEvent(rmNode));
}
// Update DTRenewer in secure mode to keep these apps alive. Today this is
// needed for log-aggregation to finish long after the apps are gone.
if (UserGroupInformation.isSecurityEnabled()) {
rmNode.context.getDelegationTokenRenewer().updateKeepAliveApplications(
statusEvent.getKeepAliveAppIds());
}
return NodeState.RUNNING;
}
}
public static class StatusUpdateWhenUnHealthyTransition implements
MultipleArcTransition<RMNodeImpl, RMNodeEvent, NodeState> {
@Override
public NodeState transition(RMNodeImpl rmNode, RMNodeEvent event) {
RMNodeStatusEvent statusEvent = (RMNodeStatusEvent) event;
// Switch the last heartbeatresponse.
rmNode.latestNodeHeartBeatResponse = statusEvent.getLatestResponse();
NodeHealthStatus remoteNodeHealthStatus = statusEvent.getNodeHealthStatus();
rmNode.setHealthReport(remoteNodeHealthStatus.getHealthReport());
rmNode.setLastHealthReportTime(
remoteNodeHealthStatus.getLastHealthReportTime());
if (remoteNodeHealthStatus.getIsNodeHealthy()) {
rmNode.context.getDispatcher().getEventHandler().handle(
new NodeAddedSchedulerEvent(rmNode));
rmNode.context.getDispatcher().getEventHandler().handle(
new NodesListManagerEvent(
NodesListManagerEventType.NODE_USABLE, rmNode));
// ??? how about updating metrics before notifying to ensure that
// notifiers get update metadata because they will very likely query it
// upon notification
// Update metrics
rmNode.updateMetricsForRejoinedNode(NodeState.UNHEALTHY);
return NodeState.RUNNING;
}
return NodeState.UNHEALTHY;
}
}
@Override
public List<UpdatedContainerInfo> pullContainerUpdates() {
List<UpdatedContainerInfo> latestContainerInfoList =
new ArrayList<UpdatedContainerInfo>();
UpdatedContainerInfo containerInfo;
while ((containerInfo = nodeUpdateQueue.poll()) != null) {
latestContainerInfoList.add(containerInfo);
}
this.nextHeartBeat = true;
return latestContainerInfoList;
}
@VisibleForTesting
public void setNextHeartBeat(boolean nextHeartBeat) {
this.nextHeartBeat = nextHeartBeat;
}
@VisibleForTesting
public int getQueueSize() {
return nodeUpdateQueue.size();
}
// For test only.
@VisibleForTesting
public Set<ContainerId> getLaunchedContainers() {
return this.launchedContainers;
}
@Override
public Set<String> getNodeLabels() {
RMNodeLabelsManager nlm = context.getNodeLabelManager();
if (nlm == null || nlm.getLabelsOnNode(nodeId) == null) {
return CommonNodeLabelsManager.EMPTY_STRING_SET;
}
return nlm.getLabelsOnNode(nodeId);
}
private void handleContainerStatus(List<ContainerStatus> containerStatuses) {
// Filter the map to only obtain just launched containers and finished
// containers.
List<ContainerStatus> newlyLaunchedContainers =
new ArrayList<ContainerStatus>();
List<ContainerStatus> completedContainers =
new ArrayList<ContainerStatus>();
for (ContainerStatus remoteContainer : containerStatuses) {
ContainerId containerId = remoteContainer.getContainerId();
// Don't bother with containers already scheduled for cleanup, or for
// applications already killed. The scheduler doens't need to know any
// more about this container
if (containersToClean.contains(containerId)) {
LOG.info("Container " + containerId + " already scheduled for "
+ "cleanup, no further processing");
continue;
}
ApplicationId containerAppId =
containerId.getApplicationAttemptId().getApplicationId();
if (finishedApplications.contains(containerAppId)) {
LOG.info("Container " + containerId
+ " belongs to an application that is already killed,"
+ " no further processing");
continue;
} else if (!runningApplications.contains(containerAppId)) {
if (LOG.isDebugEnabled()) {
LOG.debug("Container " + containerId
+ " is the first container get launched for application "
+ containerAppId);
}
runningApplications.add(containerAppId);
}
// Process running containers
if (remoteContainer.getState() == ContainerState.RUNNING) {
if (!launchedContainers.contains(containerId)) {
// Just launched container. RM knows about it the first time.
launchedContainers.add(containerId);
newlyLaunchedContainers.add(remoteContainer);
// Unregister from containerAllocationExpirer.
containerAllocationExpirer.unregister(containerId);
}
} else {
// A finished container
launchedContainers.remove(containerId);
completedContainers.add(remoteContainer);
// Unregister from containerAllocationExpirer.
containerAllocationExpirer.unregister(containerId);
}
}
if (newlyLaunchedContainers.size() != 0 || completedContainers.size() != 0) {
nodeUpdateQueue.add(new UpdatedContainerInfo(newlyLaunchedContainers,
completedContainers));
}
}
private void handleLogAggregationStatus(
List<LogAggregationReport> logAggregationReportsForApps) {
for (LogAggregationReport report : logAggregationReportsForApps) {
RMApp rmApp = this.context.getRMApps().get(report.getApplicationId());
if (rmApp != null) {
((RMAppImpl)rmApp).aggregateLogReport(this.nodeId, report);
}
}
}
}
| 36,302 | 35.743927 | 102 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeStartedEvent.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.resourcemanager.rmnode;
import java.util.List;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.server.api.protocolrecords.NMContainerStatus;
public class RMNodeStartedEvent extends RMNodeEvent {
private List<NMContainerStatus> containerStatuses;
private List<ApplicationId> runningApplications;
public RMNodeStartedEvent(NodeId nodeId,
List<NMContainerStatus> containerReports,
List<ApplicationId> runningApplications) {
super(nodeId, RMNodeEventType.STARTED);
this.containerStatuses = containerReports;
this.runningApplications = runningApplications;
}
public List<NMContainerStatus> getNMContainerStatuses() {
return this.containerStatuses;
}
public List<ApplicationId> getRunningApplications() {
return runningApplications;
}
}
| 1,723 | 34.916667 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeEventType.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.resourcemanager.rmnode;
public enum RMNodeEventType {
STARTED,
// Source: AdminService
DECOMMISSION,
DECOMMISSION_WITH_TIMEOUT,
RECOMMISSION,
// Source: AdminService, ResourceTrackerService
RESOURCE_UPDATE,
// ResourceTrackerService
STATUS_UPDATE,
REBOOTING,
RECONNECTED,
SHUTDOWN,
// Source: Application
CLEANUP_APP,
// Source: Container
CONTAINER_ALLOCATED,
CLEANUP_CONTAINER,
// Source: RMAppAttempt
FINISHED_CONTAINERS_PULLED_BY_AM,
// Source: NMLivelinessMonitor
EXPIRE
}
| 1,380 | 25.557692 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/UpdatedContainerInfo.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.resourcemanager.rmnode;
import java.util.List;
import org.apache.hadoop.yarn.api.records.ContainerStatus;
public class UpdatedContainerInfo {
private List<ContainerStatus> newlyLaunchedContainers;
private List<ContainerStatus> completedContainers;
public UpdatedContainerInfo() {
}
public UpdatedContainerInfo(List<ContainerStatus> newlyLaunchedContainers
, List<ContainerStatus> completedContainers) {
this.newlyLaunchedContainers = newlyLaunchedContainers;
this.completedContainers = completedContainers;
}
public List<ContainerStatus> getNewlyLaunchedContainers() {
return this.newlyLaunchedContainers;
}
public List<ContainerStatus> getCompletedContainers() {
return this.completedContainers;
}
}
| 1,599 | 33.782609 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeResourceUpdateEvent.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.resourcemanager.rmnode;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.api.records.ResourceOption;
public class RMNodeResourceUpdateEvent extends RMNodeEvent {
private final ResourceOption resourceOption;
public RMNodeResourceUpdateEvent(NodeId nodeId, ResourceOption resourceOption) {
super(nodeId, RMNodeEventType.RESOURCE_UPDATE);
this.resourceOption = resourceOption;
}
public ResourceOption getResourceOption() {
return resourceOption;
}
}
| 1,354 | 34.657895 | 82 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeStatusEvent.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.resourcemanager.rmnode;
import java.util.List;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ContainerStatus;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.server.api.protocolrecords.LogAggregationReport;
import org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatResponse;
import org.apache.hadoop.yarn.server.api.records.NodeHealthStatus;
public class RMNodeStatusEvent extends RMNodeEvent {
private final NodeHealthStatus nodeHealthStatus;
private final List<ContainerStatus> containersCollection;
private final NodeHeartbeatResponse latestResponse;
private final List<ApplicationId> keepAliveAppIds;
private List<LogAggregationReport> logAggregationReportsForApps;
public RMNodeStatusEvent(NodeId nodeId, NodeHealthStatus nodeHealthStatus,
List<ContainerStatus> collection, List<ApplicationId> keepAliveAppIds,
NodeHeartbeatResponse latestResponse) {
super(nodeId, RMNodeEventType.STATUS_UPDATE);
this.nodeHealthStatus = nodeHealthStatus;
this.containersCollection = collection;
this.keepAliveAppIds = keepAliveAppIds;
this.latestResponse = latestResponse;
this.logAggregationReportsForApps = null;
}
public RMNodeStatusEvent(NodeId nodeId, NodeHealthStatus nodeHealthStatus,
List<ContainerStatus> collection, List<ApplicationId> keepAliveAppIds,
NodeHeartbeatResponse latestResponse,
List<LogAggregationReport> logAggregationReportsForApps) {
super(nodeId, RMNodeEventType.STATUS_UPDATE);
this.nodeHealthStatus = nodeHealthStatus;
this.containersCollection = collection;
this.keepAliveAppIds = keepAliveAppIds;
this.latestResponse = latestResponse;
this.logAggregationReportsForApps = logAggregationReportsForApps;
}
public NodeHealthStatus getNodeHealthStatus() {
return this.nodeHealthStatus;
}
public List<ContainerStatus> getContainers() {
return this.containersCollection;
}
public NodeHeartbeatResponse getLatestResponse() {
return this.latestResponse;
}
public List<ApplicationId> getKeepAliveAppIds() {
return this.keepAliveAppIds;
}
public List<LogAggregationReport> getLogAggregationReportsForApps() {
return this.logAggregationReportsForApps;
}
public void setLogAggregationReportsForApps(
List<LogAggregationReport> logAggregationReportsForApps) {
this.logAggregationReportsForApps = logAggregationReportsForApps;
}
}
| 3,341 | 38.785714 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeReconnectEvent.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.resourcemanager.rmnode;
import java.util.List;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.server.api.protocolrecords.NMContainerStatus;
public class RMNodeReconnectEvent extends RMNodeEvent {
private RMNode reconnectedNode;
private List<ApplicationId> runningApplications;
private List<NMContainerStatus> containerStatuses;
public RMNodeReconnectEvent(NodeId nodeId, RMNode newNode,
List<ApplicationId> runningApps, List<NMContainerStatus> containerReports) {
super(nodeId, RMNodeEventType.RECONNECTED);
reconnectedNode = newNode;
runningApplications = runningApps;
containerStatuses = containerReports;
}
public RMNode getReconnectedNode() {
return reconnectedNode;
}
public List<ApplicationId> getRunningApplications() {
return runningApplications;
}
public List<NMContainerStatus> getNMContainerStatuses() {
return containerStatuses;
}
}
| 1,828 | 34.173077 | 82 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeCleanAppEvent.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.resourcemanager.rmnode;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.NodeId;
public class RMNodeCleanAppEvent extends RMNodeEvent {
private ApplicationId appId;
public RMNodeCleanAppEvent(NodeId nodeId, ApplicationId appId) {
super(nodeId, RMNodeEventType.CLEANUP_APP);
this.appId = appId;
}
public ApplicationId getAppId() {
return this.appId;
}
}
| 1,276 | 33.513514 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeFinishedContainersPulledByAMEvent.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.resourcemanager.rmnode;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.NodeId;
import java.util.List;
// Happens after an implicit ack from AM that the container completion has
// been notified successfully to the AM
public class RMNodeFinishedContainersPulledByAMEvent extends RMNodeEvent {
private List<ContainerId> containers;
public RMNodeFinishedContainersPulledByAMEvent(NodeId nodeId,
List<ContainerId> containers) {
super(nodeId, RMNodeEventType.FINISHED_CONTAINERS_PULLED_BY_AM);
this.containers = containers;
}
public List<ContainerId> getContainers() {
return this.containers;
}
}
| 1,522 | 35.261905 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeCleanContainerEvent.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.resourcemanager.rmnode;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.NodeId;
public class RMNodeCleanContainerEvent extends RMNodeEvent {
private ContainerId contId;
public RMNodeCleanContainerEvent(NodeId nodeId, ContainerId contId) {
super(nodeId, RMNodeEventType.CLEANUP_CONTAINER);
this.contId = contId;
}
public ContainerId getContainerId() {
return this.contId;
}
}
| 1,297 | 34.081081 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeEvent.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.resourcemanager.rmnode;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.event.AbstractEvent;
public class RMNodeEvent extends AbstractEvent<RMNodeEventType> {
private final NodeId nodeId;
public RMNodeEvent(NodeId nodeId, RMNodeEventType type) {
super(type);
this.nodeId = nodeId;
}
public NodeId getNodeId() {
return this.nodeId;
}
}
| 1,240 | 32.540541 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNode.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.resourcemanager.rmnode;
import java.util.List;
import java.util.Set;
import org.apache.hadoop.net.Node;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.api.records.NodeState;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatResponse;
/**
* Node managers information on available resources
* and other static information.
*
*/
public interface RMNode {
/** negative value means no timeout */
public static final int OVER_COMMIT_TIMEOUT_MILLIS_DEFAULT = -1;
/**
* the node id of of this node.
* @return the node id of this node.
*/
public NodeId getNodeID();
/**
* the hostname of this node
* @return hostname of this node
*/
public String getHostName();
/**
* the command port for this node
* @return command port for this node
*/
public int getCommandPort();
/**
* the http port for this node
* @return http port for this node
*/
public int getHttpPort();
/**
* the ContainerManager address for this node.
* @return the ContainerManager address for this node.
*/
public String getNodeAddress();
/**
* the http-Address for this node.
* @return the http-url address for this node
*/
public String getHttpAddress();
/**
* the latest health report received from this node.
* @return the latest health report received from this node.
*/
public String getHealthReport();
/**
* the time of the latest health report received from this node.
* @return the time of the latest health report received from this node.
*/
public long getLastHealthReportTime();
/**
* the node manager version of the node received as part of the
* registration with the resource manager
*/
public String getNodeManagerVersion();
/**
* the total available resource.
* @return the total available resource.
*/
public Resource getTotalCapability();
/**
* The rack name for this node manager.
* @return the rack name.
*/
public String getRackName();
/**
* the {@link Node} information for this node.
* @return {@link Node} information for this node.
*/
public Node getNode();
public NodeState getState();
public List<ContainerId> getContainersToCleanUp();
public List<ApplicationId> getAppsToCleanup();
List<ApplicationId> getRunningApps();
/**
* Update a {@link NodeHeartbeatResponse} with the list of containers and
* applications to clean up for this node.
* @param response the {@link NodeHeartbeatResponse} to update
*/
public void updateNodeHeartbeatResponseForCleanup(NodeHeartbeatResponse response);
public NodeHeartbeatResponse getLastNodeHeartBeatResponse();
/**
* Get and clear the list of containerUpdates accumulated across NM
* heartbeats.
*
* @return containerUpdates accumulated across NM heartbeats.
*/
public List<UpdatedContainerInfo> pullContainerUpdates();
/**
* Get set of labels in this node
*
* @return labels in this node
*/
public Set<String> getNodeLabels();
}
| 4,093 | 26.662162 | 84 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/test/java/org/apache/hadoop/yarn/server/webproxy/TestWebAppProxyServlet.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.webproxy;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.net.HttpCookie;
import java.net.HttpURLConnection;
import java.net.URI;
import java.net.URL;
import java.util.Enumeration;
import java.util.List;
import java.util.Map;
import javax.servlet.ServletException;
import javax.servlet.http.HttpServlet;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.http.HttpServer2;
import org.apache.hadoop.security.authorize.AccessControlList;
import org.apache.hadoop.service.CompositeService;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ApplicationReport;
import org.apache.hadoop.yarn.api.records.YarnApplicationState;
import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationReportPBImpl;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.exceptions.ApplicationNotFoundException;
import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.webapp.util.WebAppUtils;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
import org.mortbay.jetty.Server;
import org.mortbay.jetty.servlet.Context;
import org.mortbay.jetty.servlet.ServletHolder;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Test the WebAppProxyServlet and WebAppProxy. For back end use simple web
* server.
*/
public class TestWebAppProxyServlet {
private static final Logger LOG = LoggerFactory.getLogger(
TestWebAppProxyServlet.class);
private static Server server;
private static int originalPort = 0;
private static int numberOfHeaders = 0;
private static final String UNKNOWN_HEADER = "Unknown-Header";
private static boolean hasUnknownHeader = false;
/**
* Simple http server. Server should send answer with status 200
*/
@BeforeClass
public static void start() throws Exception {
server = new Server(0);
Context context = new Context();
context.setContextPath("/foo");
server.setHandler(context);
context.addServlet(new ServletHolder(TestServlet.class), "/bar");
server.getConnectors()[0].setHost("localhost");
server.start();
originalPort = server.getConnectors()[0].getLocalPort();
LOG.info("Running embedded servlet container at: http://localhost:"
+ originalPort);
// This property needs to be set otherwise CORS Headers will be dropped
// by HttpUrlConnection
System.setProperty("sun.net.http.allowRestrictedHeaders", "true");
}
@SuppressWarnings("serial")
public static class TestServlet extends HttpServlet {
@Override
protected void doGet(HttpServletRequest req, HttpServletResponse resp)
throws ServletException, IOException {
int numHeaders = 0;
hasUnknownHeader = false;
@SuppressWarnings("unchecked")
Enumeration<String> names = req.getHeaderNames();
while(names.hasMoreElements()) {
String headerName = names.nextElement();
if (headerName.equals(UNKNOWN_HEADER)) {
hasUnknownHeader = true;
}
++numHeaders;
}
numberOfHeaders = numHeaders;
resp.setStatus(HttpServletResponse.SC_OK);
}
@Override
protected void doPost(HttpServletRequest req, HttpServletResponse resp)
throws ServletException, IOException {
InputStream is = req.getInputStream();
OutputStream os = resp.getOutputStream();
int c = is.read();
while (c > -1) {
os.write(c);
c = is.read();
}
is.close();
os.close();
resp.setStatus(HttpServletResponse.SC_OK);
}
}
@Test(timeout=5000)
public void testWebAppProxyServlet() throws Exception {
Configuration configuration = new Configuration();
configuration.set(YarnConfiguration.PROXY_ADDRESS, "localhost:9090");
// overriding num of web server threads, see HttpServer.HTTP_MAXTHREADS
configuration.setInt("hadoop.http.max.threads", 5);
WebAppProxyServerForTest proxy = new WebAppProxyServerForTest();
proxy.init(configuration);
proxy.start();
int proxyPort = proxy.proxy.proxyServer.getConnectorAddress(0).getPort();
AppReportFetcherForTest appReportFetcher = proxy.proxy.appReportFetcher;
// wrong url
try {
// wrong url without app ID
URL emptyUrl = new URL("http://localhost:" + proxyPort + "/proxy");
HttpURLConnection emptyProxyConn = (HttpURLConnection) emptyUrl
.openConnection();
emptyProxyConn.connect();;
assertEquals(HttpURLConnection.HTTP_NOT_FOUND, emptyProxyConn.getResponseCode());
// wrong url. Set wrong app ID
URL wrongUrl = new URL("http://localhost:" + proxyPort + "/proxy/app");
HttpURLConnection proxyConn = (HttpURLConnection) wrongUrl
.openConnection();
proxyConn.connect();
assertEquals(HttpURLConnection.HTTP_INTERNAL_ERROR,
proxyConn.getResponseCode());
// set true Application ID in url
URL url = new URL("http://localhost:" + proxyPort + "/proxy/application_00_0");
proxyConn = (HttpURLConnection) url.openConnection();
// set cookie
proxyConn.setRequestProperty("Cookie", "checked_application_0_0000=true");
proxyConn.connect();
assertEquals(HttpURLConnection.HTTP_OK, proxyConn.getResponseCode());
assertTrue(isResponseCookiePresent(
proxyConn, "checked_application_0_0000", "true"));
// cannot found application 1: null
appReportFetcher.answer = 1;
proxyConn = (HttpURLConnection) url.openConnection();
proxyConn.setRequestProperty("Cookie", "checked_application_0_0000=true");
proxyConn.connect();
assertEquals(HttpURLConnection.HTTP_NOT_FOUND,
proxyConn.getResponseCode());
assertFalse(isResponseCookiePresent(
proxyConn, "checked_application_0_0000", "true"));
// cannot found application 2: ApplicationNotFoundException
appReportFetcher.answer = 4;
proxyConn = (HttpURLConnection) url.openConnection();
proxyConn.setRequestProperty("Cookie", "checked_application_0_0000=true");
proxyConn.connect();
assertEquals(HttpURLConnection.HTTP_NOT_FOUND,
proxyConn.getResponseCode());
assertFalse(isResponseCookiePresent(
proxyConn, "checked_application_0_0000", "true"));
// wrong user
appReportFetcher.answer = 2;
proxyConn = (HttpURLConnection) url.openConnection();
proxyConn.connect();
assertEquals(HttpURLConnection.HTTP_OK, proxyConn.getResponseCode());
String s = readInputStream(proxyConn.getInputStream());
assertTrue(s
.contains("to continue to an Application Master web interface owned by"));
assertTrue(s.contains("WARNING: The following page may not be safe!"));
//case if task has a not running status
appReportFetcher.answer = 3;
proxyConn = (HttpURLConnection) url.openConnection();
proxyConn.setRequestProperty("Cookie", "checked_application_0_0000=true");
proxyConn.connect();
assertEquals(HttpURLConnection.HTTP_OK, proxyConn.getResponseCode());
// test user-provided path and query parameter can be appended to the
// original tracking url
appReportFetcher.answer = 5;
URL clientUrl = new URL("http://localhost:" + proxyPort
+ "/proxy/application_00_0/test/tez?x=y&h=p");
proxyConn = (HttpURLConnection) clientUrl.openConnection();
proxyConn.connect();
LOG.info("" + proxyConn.getURL());
LOG.info("ProxyConn.getHeaderField(): " + proxyConn.getHeaderField(ProxyUtils.LOCATION));
assertEquals("http://localhost:" + originalPort
+ "/foo/bar/test/tez?a=b&x=y&h=p#main", proxyConn.getURL().toString());
} finally {
proxy.close();
}
}
@Test(timeout=5000)
public void testWebAppProxyPassThroughHeaders() throws Exception {
Configuration configuration = new Configuration();
configuration.set(YarnConfiguration.PROXY_ADDRESS, "localhost:9091");
configuration.setInt("hadoop.http.max.threads", 5);
WebAppProxyServerForTest proxy = new WebAppProxyServerForTest();
proxy.init(configuration);
proxy.start();
int proxyPort = proxy.proxy.proxyServer.getConnectorAddress(0).getPort();
try {
URL url = new URL("http://localhost:" + proxyPort + "/proxy/application_00_1");
HttpURLConnection proxyConn = (HttpURLConnection) url.openConnection();
// set headers
proxyConn.addRequestProperty("Origin", "http://www.someurl.com");
proxyConn.addRequestProperty("Access-Control-Request-Method", "GET");
proxyConn.addRequestProperty(
"Access-Control-Request-Headers", "Authorization");
proxyConn.addRequestProperty(UNKNOWN_HEADER, "unknown");
// Verify if four headers mentioned above have been added
assertEquals(proxyConn.getRequestProperties().size(), 4);
proxyConn.connect();
assertEquals(HttpURLConnection.HTTP_OK, proxyConn.getResponseCode());
// Verify if number of headers received by end server is 8.
// Eight headers include Accept, Host, Connection, User-Agent, Cookie,
// Origin, Access-Control-Request-Method and
// Access-Control-Request-Headers. Pls note that Unknown-Header is dropped
// by proxy as it is not in the list of allowed headers.
assertEquals(numberOfHeaders, 8);
assertFalse(hasUnknownHeader);
} finally {
proxy.close();
}
}
/**
* Test main method of WebAppProxyServer
*/
@Test(timeout=5000)
public void testWebAppProxyServerMainMethod() throws Exception {
WebAppProxyServer mainServer = null;
Configuration conf = new YarnConfiguration();
conf.set(YarnConfiguration.PROXY_ADDRESS, "localhost:9099");
try {
mainServer = WebAppProxyServer.startServer(conf);
int counter = 20;
URL wrongUrl = new URL("http://localhost:9099/proxy/app");
HttpURLConnection proxyConn = null;
while (counter > 0) {
counter--;
try {
proxyConn = (HttpURLConnection) wrongUrl.openConnection();
proxyConn.connect();
proxyConn.getResponseCode();
// server started ok
counter = 0;
} catch (Exception e) {
Thread.sleep(100);
}
}
assertNotNull(proxyConn);
// wrong application Id
assertEquals(HttpURLConnection.HTTP_INTERNAL_ERROR,
proxyConn.getResponseCode());
} finally {
if (mainServer != null) {
mainServer.stop();
}
}
}
private String readInputStream(InputStream input) throws Exception {
ByteArrayOutputStream data = new ByteArrayOutputStream();
byte[] buffer = new byte[512];
int read;
while ((read = input.read(buffer)) >= 0) {
data.write(buffer, 0, read);
}
return new String(data.toByteArray(), "UTF-8");
}
private boolean isResponseCookiePresent(HttpURLConnection proxyConn,
String expectedName, String expectedValue) {
Map<String, List<String>> headerFields = proxyConn.getHeaderFields();
List<String> cookiesHeader = headerFields.get("Set-Cookie");
if (cookiesHeader != null) {
for (String cookie : cookiesHeader) {
HttpCookie c = HttpCookie.parse(cookie).get(0);
if (c.getName().equals(expectedName)
&& c.getValue().equals(expectedValue)) {
return true;
}
}
}
return false;
}
@AfterClass
public static void stop() throws Exception {
try {
server.stop();
} catch (Exception e) {
}
try {
server.destroy();
} catch (Exception e) {
}
}
private class WebAppProxyServerForTest extends CompositeService {
private WebAppProxyForTest proxy = null;
public WebAppProxyServerForTest() {
super(WebAppProxyServer.class.getName());
}
@Override
public synchronized void serviceInit(Configuration conf) throws Exception {
proxy = new WebAppProxyForTest();
addService(proxy);
super.serviceInit(conf);
}
}
private class WebAppProxyForTest extends WebAppProxy {
HttpServer2 proxyServer;
AppReportFetcherForTest appReportFetcher;
@Override
protected void serviceStart() throws Exception {
Configuration conf = getConfig();
String bindAddress = conf.get(YarnConfiguration.PROXY_ADDRESS);
bindAddress = StringUtils.split(bindAddress, ':')[0];
AccessControlList acl = new AccessControlList(
conf.get(YarnConfiguration.YARN_ADMIN_ACL,
YarnConfiguration.DEFAULT_YARN_ADMIN_ACL));
proxyServer = new HttpServer2.Builder()
.setName("proxy")
.addEndpoint(
URI.create(WebAppUtils.getHttpSchemePrefix(conf) + bindAddress
+ ":0")).setFindPort(true)
.setConf(conf)
.setACL(acl)
.build();
proxyServer.addServlet(ProxyUriUtils.PROXY_SERVLET_NAME,
ProxyUriUtils.PROXY_PATH_SPEC, WebAppProxyServlet.class);
appReportFetcher = new AppReportFetcherForTest(conf);
proxyServer.setAttribute(FETCHER_ATTRIBUTE,
appReportFetcher );
proxyServer.setAttribute(IS_SECURITY_ENABLED_ATTRIBUTE, Boolean.TRUE);
String proxy = WebAppUtils.getProxyHostAndPort(conf);
String[] proxyParts = proxy.split(":");
String proxyHost = proxyParts[0];
proxyServer.setAttribute(PROXY_HOST_ATTRIBUTE, proxyHost);
proxyServer.start();
LOG.info("Proxy server is started at port {}",
proxyServer.getConnectorAddress(0).getPort());
}
}
private class AppReportFetcherForTest extends AppReportFetcher {
int answer = 0;
public AppReportFetcherForTest(Configuration conf) {
super(conf);
}
public ApplicationReport getApplicationReport(ApplicationId appId)
throws YarnException {
if (answer == 0) {
return getDefaultApplicationReport(appId);
} else if (answer == 1) {
return null;
} else if (answer == 2) {
ApplicationReport result = getDefaultApplicationReport(appId);
result.setUser("user");
return result;
} else if (answer == 3) {
ApplicationReport result = getDefaultApplicationReport(appId);
result.setYarnApplicationState(YarnApplicationState.KILLED);
return result;
} else if (answer == 4) {
throw new ApplicationNotFoundException("Application is not found");
} else if (answer == 5) {
// test user-provided path and query parameter can be appended to the
// original tracking url
ApplicationReport result = getDefaultApplicationReport(appId);
result.setOriginalTrackingUrl("localhost:" + originalPort
+ "/foo/bar?a=b#main");
result.setYarnApplicationState(YarnApplicationState.FINISHED);
return result;
}
return null;
}
private ApplicationReport getDefaultApplicationReport(ApplicationId appId) {
ApplicationReport result = new ApplicationReportPBImpl();
result.setApplicationId(appId);
result.setOriginalTrackingUrl("localhost:" + originalPort + "/foo/bar");
result.setYarnApplicationState(YarnApplicationState.RUNNING);
result.setUser(CommonConfigurationKeys.DEFAULT_HADOOP_HTTP_STATIC_USER);
return result;
}
}
}
| 16,640 | 36.228188 | 96 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/test/java/org/apache/hadoop/yarn/server/webproxy/TestWebAppProxyServer.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.webproxy;
import static org.junit.Assert.assertEquals;
import org.apache.hadoop.service.Service;
import org.apache.hadoop.service.Service.STATE;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.server.webproxy.WebAppProxyServer;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import java.net.InetSocketAddress;
public class TestWebAppProxyServer {
private WebAppProxyServer webAppProxy = null;
private final String proxyAddress = "0.0.0.0:8888";
@Before
public void setUp() throws Exception {
YarnConfiguration conf = new YarnConfiguration();
conf.set(YarnConfiguration.PROXY_ADDRESS, proxyAddress);
webAppProxy = new WebAppProxyServer();
webAppProxy.init(conf);
}
@After
public void tearDown() throws Exception {
webAppProxy.stop();
}
@Test
public void testStart() {
assertEquals(STATE.INITED, webAppProxy.getServiceState());
webAppProxy.start();
for (Service service : webAppProxy.getServices()) {
if (service instanceof WebAppProxy) {
assertEquals(((WebAppProxy) service).getBindAddress(), proxyAddress);
}
}
assertEquals(STATE.STARTED, webAppProxy.getServiceState());
}
@Test
public void testBindAddress() {
YarnConfiguration conf = new YarnConfiguration();
InetSocketAddress defaultBindAddress = WebAppProxyServer.getBindAddress(conf);
Assert.assertEquals("Web Proxy default bind address port is incorrect",
YarnConfiguration.DEFAULT_PROXY_PORT,
defaultBindAddress.getPort());
}
}
| 2,426 | 32.246575 | 82 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/test/java/org/apache/hadoop/yarn/server/webproxy/TestProxyUriUtils.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.webproxy;
import static org.junit.Assert.*;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.List;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.server.utils.BuilderUtils;
import org.apache.hadoop.yarn.util.TrackingUriPlugin;
import org.junit.Test;
import com.google.common.collect.Lists;
public class TestProxyUriUtils {
@Test
public void testGetPathApplicationId() {
assertEquals("/proxy/application_100_0001",
ProxyUriUtils.getPath(BuilderUtils.newApplicationId(100l, 1)));
assertEquals("/proxy/application_6384623_0005",
ProxyUriUtils.getPath(BuilderUtils.newApplicationId(6384623l, 5)));
}
@Test(expected = IllegalArgumentException.class)
public void testGetPathApplicationIdBad() {
ProxyUriUtils.getPath(null);
}
@Test
public void testGetPathApplicationIdString() {
assertEquals("/proxy/application_6384623_0005",
ProxyUriUtils.getPath(BuilderUtils.newApplicationId(6384623l, 5), null));
assertEquals("/proxy/application_6384623_0005/static/app",
ProxyUriUtils.getPath(BuilderUtils.newApplicationId(6384623l, 5), "/static/app"));
assertEquals("/proxy/application_6384623_0005/",
ProxyUriUtils.getPath(BuilderUtils.newApplicationId(6384623l, 5), "/"));
assertEquals("/proxy/application_6384623_0005/some/path",
ProxyUriUtils.getPath(BuilderUtils.newApplicationId(6384623l, 5), "some/path"));
}
@Test
public void testGetPathAndQuery() {
assertEquals("/proxy/application_6384623_0005/static/app?foo=bar",
ProxyUriUtils.getPathAndQuery(BuilderUtils.newApplicationId(6384623l, 5), "/static/app",
"?foo=bar", false));
assertEquals("/proxy/application_6384623_0005/static/app?foo=bar&bad=good&proxyapproved=true",
ProxyUriUtils.getPathAndQuery(BuilderUtils.newApplicationId(6384623l, 5), "/static/app",
"foo=bar&bad=good", true));
}
@Test
public void testGetProxyUri() throws Exception {
URI originalUri = new URI("http://host.com/static/foo?bar=bar");
URI proxyUri = new URI("http://proxy.net:8080/");
ApplicationId id = BuilderUtils.newApplicationId(6384623l, 5);
URI expected = new URI("http://proxy.net:8080/proxy/application_6384623_0005/static/foo?bar=bar");
URI result = ProxyUriUtils.getProxyUri(originalUri, proxyUri, id);
assertEquals(expected, result);
}
@Test
public void testGetProxyUriNull() throws Exception {
URI originalUri = null;
URI proxyUri = new URI("http://proxy.net:8080/");
ApplicationId id = BuilderUtils.newApplicationId(6384623l, 5);
URI expected = new URI("http://proxy.net:8080/proxy/application_6384623_0005/");
URI result = ProxyUriUtils.getProxyUri(originalUri, proxyUri, id);
assertEquals(expected, result);
}
@Test
public void testGetProxyUriFromPluginsReturnsNullIfNoPlugins()
throws URISyntaxException {
ApplicationId id = BuilderUtils.newApplicationId(6384623l, 5);
List<TrackingUriPlugin> list =
Lists.newArrayListWithExpectedSize(0);
assertNull(ProxyUriUtils.getUriFromTrackingPlugins(id, list));
}
@Test
public void testGetProxyUriFromPluginsReturnsValidUriWhenAble()
throws URISyntaxException {
ApplicationId id = BuilderUtils.newApplicationId(6384623l, 5);
List<TrackingUriPlugin> list =
Lists.newArrayListWithExpectedSize(2);
// Insert a plugin that returns null.
list.add(new TrackingUriPlugin() {
public URI getTrackingUri(ApplicationId id) throws URISyntaxException {
return null;
}
});
// Insert a plugin that returns a valid URI.
list.add(new TrackingUriPlugin() {
public URI getTrackingUri(ApplicationId id) throws URISyntaxException {
return new URI("http://history.server.net/");
}
});
URI result = ProxyUriUtils.getUriFromTrackingPlugins(id, list);
assertNotNull(result);
}
}
| 4,793 | 37.66129 | 102 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/test/java/org/apache/hadoop/yarn/server/webproxy/TestAppReportFetcher.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.webproxy;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.yarn.api.ApplicationClientProtocol;
import org.apache.hadoop.yarn.api.ApplicationHistoryProtocol;
import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportResponse;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ApplicationReport;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.exceptions.ApplicationNotFoundException;
import org.apache.hadoop.yarn.exceptions.YarnException;
import org.junit.After;
import org.junit.Assert;
import org.junit.Test;
import org.mockito.Mockito;
public class TestAppReportFetcher {
static ApplicationHistoryProtocol historyManager;
static Configuration conf = new Configuration();
private static ApplicationClientProtocol appManager;
private static AppReportFetcher fetcher;
private final String appNotFoundExceptionMsg = "APP NOT FOUND";
@After
public void cleanUp() {
historyManager = null;
appManager = null;
fetcher = null;
}
public void testHelper(boolean isAHSEnabled)
throws YarnException, IOException {
conf.setBoolean(YarnConfiguration.APPLICATION_HISTORY_ENABLED,
isAHSEnabled);
appManager = Mockito.mock(ApplicationClientProtocol.class);
Mockito.when(appManager
.getApplicationReport(Mockito.any(GetApplicationReportRequest.class)))
.thenThrow(new ApplicationNotFoundException(appNotFoundExceptionMsg));
fetcher = new AppReportFetcherForTest(conf, appManager);
ApplicationId appId = ApplicationId.newInstance(0,0);
fetcher.getApplicationReport(appId);
}
@Test
public void testFetchReportAHSEnabled() throws YarnException, IOException {
testHelper(true);
Mockito.verify(historyManager, Mockito.times(1))
.getApplicationReport(Mockito.any(GetApplicationReportRequest.class));
Mockito.verify(appManager, Mockito.times(1))
.getApplicationReport(Mockito.any(GetApplicationReportRequest.class));
}
@Test
public void testFetchReportAHSDisabled() throws YarnException, IOException {
try {
testHelper(false);
} catch (ApplicationNotFoundException e) {
Assert.assertTrue(e.getMessage() == appNotFoundExceptionMsg);
/* RM will not know of the app and Application History Service is disabled
* So we will not try to get the report from AHS and RM will throw
* ApplicationNotFoundException
*/
}
Mockito.verify(appManager, Mockito.times(1))
.getApplicationReport(Mockito.any(GetApplicationReportRequest.class));
if (historyManager != null) {
Assert.fail("HistoryManager should be null as AHS is disabled");
}
}
static class AppReportFetcherForTest extends AppReportFetcher {
public AppReportFetcherForTest(Configuration conf,
ApplicationClientProtocol acp) {
super(conf, acp);
}
@Override
protected ApplicationHistoryProtocol getAHSProxy(Configuration conf)
throws IOException
{
GetApplicationReportResponse resp = Mockito.
mock(GetApplicationReportResponse.class);
historyManager = Mockito.mock(ApplicationHistoryProtocol.class);
try {
Mockito.when(historyManager.getApplicationReport(Mockito
.any(GetApplicationReportRequest.class))).thenReturn(resp);
} catch (YarnException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
return historyManager;
}
}
}
| 4,448 | 36.70339 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/test/java/org/apache/hadoop/yarn/server/webproxy/amfilter/TestAmFilterInitializer.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.webproxy.amfilter;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import junit.framework.TestCase;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.http.FilterContainer;
import org.apache.hadoop.http.HttpConfig;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.webapp.util.WebAppUtils;
import org.junit.Test;
public class TestAmFilterInitializer extends TestCase {
@Override
protected void setUp() throws Exception {
super.setUp();
NetUtils.addStaticResolution("host1", "172.0.0.1");
NetUtils.addStaticResolution("host2", "172.0.0.1");
NetUtils.addStaticResolution("host3", "172.0.0.1");
NetUtils.addStaticResolution("host4", "172.0.0.1");
NetUtils.addStaticResolution("host5", "172.0.0.1");
NetUtils.addStaticResolution("host6", "172.0.0.1");
}
@Test
public void testInitFilter() {
// Check PROXY_ADDRESS
MockFilterContainer con = new MockFilterContainer();
Configuration conf = new Configuration(false);
conf.set(YarnConfiguration.PROXY_ADDRESS, "host1:1000");
AmFilterInitializer afi = new MockAmFilterInitializer();
assertNull(con.givenParameters);
afi.initFilter(con, conf);
assertEquals(2, con.givenParameters.size());
assertEquals("host1", con.givenParameters.get(AmIpFilter.PROXY_HOSTS));
assertEquals("http://host1:1000/foo",
con.givenParameters.get(AmIpFilter.PROXY_URI_BASES));
// Check a single RM_WEBAPP_ADDRESS
con = new MockFilterContainer();
conf = new Configuration(false);
conf.set(YarnConfiguration.RM_WEBAPP_ADDRESS, "host2:2000");
afi = new MockAmFilterInitializer();
assertNull(con.givenParameters);
afi.initFilter(con, conf);
assertEquals(2, con.givenParameters.size());
assertEquals("host2", con.givenParameters.get(AmIpFilter.PROXY_HOSTS));
assertEquals("http://host2:2000/foo",
con.givenParameters.get(AmIpFilter.PROXY_URI_BASES));
// Check multiple RM_WEBAPP_ADDRESSes (RM HA)
con = new MockFilterContainer();
conf = new Configuration(false);
conf.setBoolean(YarnConfiguration.RM_HA_ENABLED, true);
conf.set(YarnConfiguration.RM_HA_IDS, "rm1,rm2,rm3");
conf.set(YarnConfiguration.RM_WEBAPP_ADDRESS + ".rm1", "host2:2000");
conf.set(YarnConfiguration.RM_WEBAPP_ADDRESS + ".rm2", "host3:3000");
conf.set(YarnConfiguration.RM_WEBAPP_ADDRESS + ".rm3", "host4:4000");
afi = new MockAmFilterInitializer();
assertNull(con.givenParameters);
afi.initFilter(con, conf);
assertEquals(2, con.givenParameters.size());
String[] proxyHosts = con.givenParameters.get(AmIpFilter.PROXY_HOSTS)
.split(AmIpFilter.PROXY_HOSTS_DELIMITER);
assertEquals(3, proxyHosts.length);
Arrays.sort(proxyHosts);
assertEquals("host2", proxyHosts[0]);
assertEquals("host3", proxyHosts[1]);
assertEquals("host4", proxyHosts[2]);
String[] proxyBases = con.givenParameters.get(AmIpFilter.PROXY_URI_BASES)
.split(AmIpFilter.PROXY_URI_BASES_DELIMITER);
assertEquals(3, proxyBases.length);
Arrays.sort(proxyBases);
assertEquals("http://host2:2000/foo", proxyBases[0]);
assertEquals("http://host3:3000/foo", proxyBases[1]);
assertEquals("http://host4:4000/foo", proxyBases[2]);
// Check multiple RM_WEBAPP_ADDRESSes (RM HA) with HTTPS
con = new MockFilterContainer();
conf = new Configuration(false);
conf.set(YarnConfiguration.YARN_HTTP_POLICY_KEY,
HttpConfig.Policy.HTTPS_ONLY.toString());
conf.setBoolean(YarnConfiguration.RM_HA_ENABLED, true);
conf.set(YarnConfiguration.RM_HA_IDS, "rm1,rm2");
conf.set(YarnConfiguration.RM_WEBAPP_HTTPS_ADDRESS + ".rm1", "host5:5000");
conf.set(YarnConfiguration.RM_WEBAPP_HTTPS_ADDRESS + ".rm2", "host6:6000");
afi = new MockAmFilterInitializer();
assertNull(con.givenParameters);
afi.initFilter(con, conf);
assertEquals(2, con.givenParameters.size());
proxyHosts = con.givenParameters.get(AmIpFilter.PROXY_HOSTS)
.split(AmIpFilter.PROXY_HOSTS_DELIMITER);
assertEquals(2, proxyHosts.length);
Arrays.sort(proxyHosts);
assertEquals("host5", proxyHosts[0]);
assertEquals("host6", proxyHosts[1]);
proxyBases = con.givenParameters.get(AmIpFilter.PROXY_URI_BASES)
.split(AmIpFilter.PROXY_URI_BASES_DELIMITER);
assertEquals(2, proxyBases.length);
Arrays.sort(proxyBases);
assertEquals("https://host5:5000/foo", proxyBases[0]);
assertEquals("https://host6:6000/foo", proxyBases[1]);
}
@Test
public void testGetProxyHostsAndPortsForAmFilter() {
// Check no configs given
Configuration conf = new Configuration(false);
List<String> proxyHosts =
WebAppUtils.getProxyHostsAndPortsForAmFilter(conf);
assertEquals(1, proxyHosts.size());
assertEquals(WebAppUtils.getResolvedRMWebAppURLWithoutScheme(conf),
proxyHosts.get(0));
// Check conf in which only RM hostname is set
conf = new Configuration(false);
conf.set(YarnConfiguration.RM_WEBAPP_ADDRESS,
"${yarn.resourcemanager.hostname}:8088"); // default in yarn-default.xml
conf.set(YarnConfiguration.RM_HOSTNAME, "host1");
proxyHosts = WebAppUtils.getProxyHostsAndPortsForAmFilter(conf);
assertEquals(1, proxyHosts.size());
assertEquals("host1:8088", proxyHosts.get(0));
// Check PROXY_ADDRESS has priority
conf = new Configuration(false);
conf.set(YarnConfiguration.PROXY_ADDRESS, "host1:1000");
conf.setBoolean(YarnConfiguration.RM_HA_ENABLED, true);
conf.set(YarnConfiguration.RM_HA_IDS, "rm1,rm2,rm3");
conf.set(YarnConfiguration.RM_WEBAPP_ADDRESS + ".rm1", "host2:2000");
conf.set(YarnConfiguration.RM_WEBAPP_ADDRESS + ".rm2", "host3:3000");
conf.set(YarnConfiguration.RM_WEBAPP_ADDRESS + ".rm3", "host4:4000");
proxyHosts = WebAppUtils.getProxyHostsAndPortsForAmFilter(conf);
assertEquals(1, proxyHosts.size());
assertEquals("host1:1000", proxyHosts.get(0));
// Check getting a single RM_WEBAPP_ADDRESS
conf = new Configuration(false);
conf.set(YarnConfiguration.RM_WEBAPP_ADDRESS, "host2:2000");
proxyHosts = WebAppUtils.getProxyHostsAndPortsForAmFilter(conf);
assertEquals(1, proxyHosts.size());
Collections.sort(proxyHosts);
assertEquals("host2:2000", proxyHosts.get(0));
// Check getting multiple RM_WEBAPP_ADDRESSes (RM HA)
conf = new Configuration(false);
conf.setBoolean(YarnConfiguration.RM_HA_ENABLED, true);
conf.set(YarnConfiguration.RM_HA_IDS, "rm1,rm2,rm3");
conf.set(YarnConfiguration.RM_WEBAPP_ADDRESS + ".rm1", "host2:2000");
conf.set(YarnConfiguration.RM_WEBAPP_ADDRESS + ".rm2", "host3:3000");
conf.set(YarnConfiguration.RM_WEBAPP_ADDRESS + ".rm3", "host4:4000");
conf.set(YarnConfiguration.RM_WEBAPP_ADDRESS + ".rm4", "dummy");
conf.set(YarnConfiguration.RM_WEBAPP_HTTPS_ADDRESS + ".rm1", "host5:5000");
conf.set(YarnConfiguration.RM_WEBAPP_HTTPS_ADDRESS + ".rm2", "host6:6000");
proxyHosts = WebAppUtils.getProxyHostsAndPortsForAmFilter(conf);
assertEquals(3, proxyHosts.size());
Collections.sort(proxyHosts);
assertEquals("host2:2000", proxyHosts.get(0));
assertEquals("host3:3000", proxyHosts.get(1));
assertEquals("host4:4000", proxyHosts.get(2));
// Check getting multiple RM_WEBAPP_ADDRESSes (RM HA) with HTTPS
conf = new Configuration(false);
conf.set(YarnConfiguration.YARN_HTTP_POLICY_KEY,
HttpConfig.Policy.HTTPS_ONLY.toString());
conf.setBoolean(YarnConfiguration.RM_HA_ENABLED, true);
conf.set(YarnConfiguration.RM_HA_IDS, "rm1,rm2,rm3,dummy");
conf.set(YarnConfiguration.RM_WEBAPP_ADDRESS + ".rm1", "host2:2000");
conf.set(YarnConfiguration.RM_WEBAPP_ADDRESS + ".rm2", "host3:3000");
conf.set(YarnConfiguration.RM_WEBAPP_ADDRESS + ".rm3", "host4:4000");
conf.set(YarnConfiguration.RM_WEBAPP_HTTPS_ADDRESS + ".rm1", "host5:5000");
conf.set(YarnConfiguration.RM_WEBAPP_HTTPS_ADDRESS + ".rm2", "host6:6000");
proxyHosts = WebAppUtils.getProxyHostsAndPortsForAmFilter(conf);
assertEquals(2, proxyHosts.size());
Collections.sort(proxyHosts);
assertEquals("host5:5000", proxyHosts.get(0));
assertEquals("host6:6000", proxyHosts.get(1));
// Check config without explicit RM_WEBAPP_ADDRESS settings (RM HA)
conf = new Configuration(false);
conf.setBoolean(YarnConfiguration.RM_HA_ENABLED, true);
conf.set(YarnConfiguration.RM_HA_IDS, "rm1,rm2,rm3");
conf.set(YarnConfiguration.RM_HOSTNAME + ".rm1", "host2");
conf.set(YarnConfiguration.RM_HOSTNAME + ".rm2", "host3");
conf.set(YarnConfiguration.RM_HOSTNAME + ".rm3", "host4");
conf.set(YarnConfiguration.RM_HOSTNAME + ".rm4", "dummy");
proxyHosts = WebAppUtils.getProxyHostsAndPortsForAmFilter(conf);
assertEquals(3, proxyHosts.size());
Collections.sort(proxyHosts);
assertEquals("host2:" + YarnConfiguration.DEFAULT_RM_WEBAPP_PORT,
proxyHosts.get(0));
assertEquals("host3:" + YarnConfiguration.DEFAULT_RM_WEBAPP_PORT,
proxyHosts.get(1));
assertEquals("host4:" + YarnConfiguration.DEFAULT_RM_WEBAPP_PORT,
proxyHosts.get(2));
// Check config without explicit RM_WEBAPP_HTTPS_ADDRESS settings (RM HA)
conf = new Configuration(false);
conf.set(YarnConfiguration.YARN_HTTP_POLICY_KEY,
HttpConfig.Policy.HTTPS_ONLY.toString());
conf.setBoolean(YarnConfiguration.RM_HA_ENABLED, true);
conf.set(YarnConfiguration.RM_HA_IDS, "rm1,rm2,rm3");
conf.set(YarnConfiguration.RM_HOSTNAME + ".rm1", "host2");
conf.set(YarnConfiguration.RM_HOSTNAME + ".rm2", "host3");
conf.set(YarnConfiguration.RM_HOSTNAME + ".rm3", "host4");
conf.set(YarnConfiguration.RM_HOSTNAME + ".rm4", "dummy");
proxyHosts = WebAppUtils.getProxyHostsAndPortsForAmFilter(conf);
assertEquals(3, proxyHosts.size());
Collections.sort(proxyHosts);
assertEquals("host2:" + YarnConfiguration.DEFAULT_RM_WEBAPP_HTTPS_PORT,
proxyHosts.get(0));
assertEquals("host3:" + YarnConfiguration.DEFAULT_RM_WEBAPP_HTTPS_PORT,
proxyHosts.get(1));
assertEquals("host4:" + YarnConfiguration.DEFAULT_RM_WEBAPP_HTTPS_PORT,
proxyHosts.get(2));
}
class MockAmFilterInitializer extends AmFilterInitializer {
@Override
protected String getApplicationWebProxyBase() {
return "/foo";
}
}
class MockFilterContainer implements FilterContainer {
Map<String, String> givenParameters;
@Override
public void addFilter(String name, String classname, Map<String,
String> parameters) {
givenParameters = parameters;
}
@Override
public void addGlobalFilter(String name, String classname,
Map<String, String> parameters) {
}
}
}
| 11,689 | 43.618321 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/test/java/org/apache/hadoop/yarn/server/webproxy/amfilter/TestAmFilter.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.webproxy.amfilter;
import java.io.IOException;
import java.io.PrintWriter;
import java.io.StringWriter;
import java.util.*;
import java.util.concurrent.atomic.AtomicBoolean;
import javax.servlet.*;
import javax.servlet.http.Cookie;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import static org.junit.Assert.*;
import org.apache.hadoop.yarn.server.webproxy.ProxyUtils;
import org.apache.hadoop.yarn.server.webproxy.WebAppProxyServlet;
import org.glassfish.grizzly.servlet.HttpServletResponseImpl;
import org.junit.Test;
import org.mockito.Mockito;
/**
* Test AmIpFilter. Requests to a no declared hosts should has way through
* proxy. Another requests can be filtered with (without) user name.
*
*/
public class TestAmFilter {
private String proxyHost = "localhost";
private String proxyUri = "http://bogus";
private String doFilterRequest;
private AmIpServletRequestWrapper servletWrapper;
private class TestAmIpFilter extends AmIpFilter {
private Set<String> proxyAddresses = null;
protected Set<String> getProxyAddresses() {
if (proxyAddresses == null) {
proxyAddresses = new HashSet<String>();
}
proxyAddresses.add(proxyHost);
return proxyAddresses;
}
}
private static class DummyFilterConfig implements FilterConfig {
final Map<String, String> map;
DummyFilterConfig(Map<String, String> map) {
this.map = map;
}
@Override
public String getFilterName() {
return "dummy";
}
@Override
public String getInitParameter(String arg0) {
return map.get(arg0);
}
@Override
public Enumeration<String> getInitParameterNames() {
return Collections.enumeration(map.keySet());
}
@Override
public ServletContext getServletContext() {
return null;
}
}
@Test(timeout = 5000)
@SuppressWarnings("deprecation")
public void filterNullCookies() throws Exception {
HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
Mockito.when(request.getCookies()).thenReturn(null);
Mockito.when(request.getRemoteAddr()).thenReturn(proxyHost);
HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
final AtomicBoolean invoked = new AtomicBoolean();
FilterChain chain = new FilterChain() {
@Override
public void doFilter(ServletRequest servletRequest,
ServletResponse servletResponse) throws IOException, ServletException {
invoked.set(true);
}
};
Map<String, String> params = new HashMap<String, String>();
params.put(AmIpFilter.PROXY_HOST, proxyHost);
params.put(AmIpFilter.PROXY_URI_BASE, proxyUri);
FilterConfig conf = new DummyFilterConfig(params);
Filter filter = new TestAmIpFilter();
filter.init(conf);
filter.doFilter(request, response, chain);
assertTrue(invoked.get());
filter.destroy();
}
/**
* Test AmIpFilter
*/
@Test(timeout = 1000)
@SuppressWarnings("deprecation")
public void testFilter() throws Exception {
Map<String, String> params = new HashMap<String, String>();
params.put(AmIpFilter.PROXY_HOST, proxyHost);
params.put(AmIpFilter.PROXY_URI_BASE, proxyUri);
FilterConfig config = new DummyFilterConfig(params);
// dummy filter
FilterChain chain = new FilterChain() {
@Override
public void doFilter(ServletRequest servletRequest,
ServletResponse servletResponse) throws IOException, ServletException {
doFilterRequest = servletRequest.getClass().getName();
if (servletRequest instanceof AmIpServletRequestWrapper) {
servletWrapper = (AmIpServletRequestWrapper) servletRequest;
}
}
};
AmIpFilter testFilter = new AmIpFilter();
testFilter.init(config);
HttpServletResponseForTest response = new HttpServletResponseForTest();
// Test request should implements HttpServletRequest
ServletRequest failRequest = Mockito.mock(ServletRequest.class);
try {
testFilter.doFilter(failRequest, response, chain);
fail();
} catch (ServletException e) {
assertEquals(ProxyUtils.E_HTTP_HTTPS_ONLY, e.getMessage());
}
// request with HttpServletRequest
HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
Mockito.when(request.getRemoteAddr()).thenReturn("redirect");
Mockito.when(request.getRequestURI()).thenReturn("/redirect");
testFilter.doFilter(request, response, chain);
// address "redirect" is not in host list
assertEquals(302, response.status);
String redirect = response.getHeader(ProxyUtils.LOCATION);
assertEquals("http://bogus/redirect", redirect);
// "127.0.0.1" contains in host list. Without cookie
Mockito.when(request.getRemoteAddr()).thenReturn("127.0.0.1");
testFilter.doFilter(request, response, chain);
assertTrue(doFilterRequest
.contains("javax.servlet.http.HttpServletRequest"));
// cookie added
Cookie[] cookies = new Cookie[1];
cookies[0] = new Cookie(WebAppProxyServlet.PROXY_USER_COOKIE_NAME, "user");
Mockito.when(request.getCookies()).thenReturn(cookies);
testFilter.doFilter(request, response, chain);
assertEquals(
"org.apache.hadoop.yarn.server.webproxy.amfilter.AmIpServletRequestWrapper",
doFilterRequest);
// request contains principal from cookie
assertEquals("user", servletWrapper.getUserPrincipal().getName());
assertEquals("user", servletWrapper.getRemoteUser());
assertFalse(servletWrapper.isUserInRole(""));
}
private class HttpServletResponseForTest extends HttpServletResponseImpl {
String redirectLocation = "";
int status;
private String contentType;
private final Map<String, String> headers = new HashMap<>(1);
private StringWriter body;
public String getRedirect() {
return redirectLocation;
}
@Override
public void sendRedirect(String location) throws IOException {
redirectLocation = location;
}
@Override
public String encodeRedirectURL(String url) {
return url;
}
@Override
public void setStatus(int status) {
this.status = status;
}
@Override
public void setContentType(String type) {
this.contentType = type;
}
@Override
public void setHeader(String name, String value) {
headers.put(name, value);
}
public String getHeader(String name) {
return headers.get(name);
}
@Override
public PrintWriter getWriter() throws IOException {
body = new StringWriter();
return new PrintWriter(body);
}
}
}
| 7,509 | 30.033058 | 84 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/WebAppProxyServlet.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.webproxy;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.ObjectInputStream;
import java.io.OutputStream;
import java.io.PrintWriter;
import java.net.InetAddress;
import java.net.URI;
import java.net.URISyntaxException;
import java.net.URLEncoder;
import java.util.Arrays;
import java.util.EnumSet;
import java.util.Enumeration;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import javax.servlet.ServletException;
import javax.servlet.http.Cookie;
import javax.servlet.http.HttpServlet;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import javax.ws.rs.core.UriBuilder;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ApplicationReport;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.exceptions.ApplicationNotFoundException;
import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.util.Apps;
import org.apache.hadoop.yarn.util.StringHelper;
import org.apache.hadoop.yarn.util.TrackingUriPlugin;
import org.apache.hadoop.yarn.webapp.MimeType;
import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
import org.apache.hadoop.yarn.webapp.util.WebAppUtils;
import org.apache.http.Header;
import org.apache.http.HttpResponse;
import org.apache.http.NameValuePair;
import org.apache.http.client.methods.HttpGet;
import org.apache.http.client.methods.HttpPut;
import org.apache.http.client.methods.HttpRequestBase;
import org.apache.http.client.params.ClientPNames;
import org.apache.http.client.params.CookiePolicy;
import org.apache.http.client.utils.URLEncodedUtils;
import org.apache.http.conn.params.ConnRoutePNames;
import org.apache.http.entity.StringEntity;
import org.apache.http.impl.client.DefaultHttpClient;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class WebAppProxyServlet extends HttpServlet {
private static final long serialVersionUID = 1L;
private static final Logger LOG = LoggerFactory.getLogger(
WebAppProxyServlet.class);
private static final Set<String> passThroughHeaders =
new HashSet<>(Arrays.asList(
"User-Agent",
"Accept",
"Accept-Encoding",
"Accept-Language",
"Accept-Charset",
"Content-Type",
"Origin",
"Access-Control-Request-Method",
"Access-Control-Request-Headers"));
public static final String PROXY_USER_COOKIE_NAME = "proxy-user";
private transient List<TrackingUriPlugin> trackingUriPlugins;
private final String rmAppPageUrlBase;
private transient YarnConfiguration conf;
/**
* HTTP methods.
*/
private enum HTTP { GET, POST, HEAD, PUT, DELETE };
/**
* Empty Hamlet class.
*/
private static class _ implements Hamlet._ {
//Empty
}
private static class Page extends Hamlet {
Page(PrintWriter out) {
super(out, 0, false);
}
public HTML<WebAppProxyServlet._> html() {
return new HTML<>("html", null, EnumSet.of(EOpt.ENDTAG));
}
}
/**
* Default constructor
*/
public WebAppProxyServlet() {
super();
conf = new YarnConfiguration();
this.trackingUriPlugins =
conf.getInstances(YarnConfiguration.YARN_TRACKING_URL_GENERATOR,
TrackingUriPlugin.class);
this.rmAppPageUrlBase = StringHelper.pjoin(
WebAppUtils.getResolvedRMWebAppURLWithScheme(conf), "cluster", "app");
}
/**
* Output 404 with appropriate message.
* @param resp the http response.
* @param message the message to include on the page.
* @throws IOException on any error.
*/
private static void notFound(HttpServletResponse resp, String message)
throws IOException {
ProxyUtils.notFound(resp, message);
}
/**
* Warn the user that the link may not be safe!
* @param resp the http response
* @param link the link to point to
* @param user the user that owns the link.
* @throws IOException on any error.
*/
private static void warnUserPage(HttpServletResponse resp, String link,
String user, ApplicationId id) throws IOException {
//Set the cookie when we warn which overrides the query parameter
//This is so that if a user passes in the approved query parameter without
//having first visited this page then this page will still be displayed
resp.addCookie(makeCheckCookie(id, false));
resp.setContentType(MimeType.HTML);
Page p = new Page(resp.getWriter());
p.html().
h1("WARNING: The following page may not be safe!").
h3().
_("click ").a(link, "here").
_(" to continue to an Application Master web interface owned by ", user).
_().
_();
}
/**
* Download link and have it be the response.
* @param req the http request
* @param resp the http response
* @param link the link to download
* @param c the cookie to set if any
* @param proxyHost the proxy host
* @param method the http method
* @throws IOException on any error.
*/
private static void proxyLink(final HttpServletRequest req,
final HttpServletResponse resp, final URI link, final Cookie c,
final String proxyHost, final HTTP method) throws IOException {
DefaultHttpClient client = new DefaultHttpClient();
client
.getParams()
.setParameter(ClientPNames.COOKIE_POLICY,
CookiePolicy.BROWSER_COMPATIBILITY)
.setBooleanParameter(ClientPNames.ALLOW_CIRCULAR_REDIRECTS, true);
// Make sure we send the request from the proxy address in the config
// since that is what the AM filter checks against. IP aliasing or
// similar could cause issues otherwise.
InetAddress localAddress = InetAddress.getByName(proxyHost);
if (LOG.isDebugEnabled()) {
LOG.debug("local InetAddress for proxy host: {}", localAddress);
}
client.getParams()
.setParameter(ConnRoutePNames.LOCAL_ADDRESS, localAddress);
HttpRequestBase base = null;
if (method.equals(HTTP.GET)) {
base = new HttpGet(link);
} else if (method.equals(HTTP.PUT)) {
base = new HttpPut(link);
StringBuilder sb = new StringBuilder();
BufferedReader reader =
new BufferedReader(
new InputStreamReader(req.getInputStream(), "UTF-8"));
String line;
while ((line = reader.readLine()) != null) {
sb.append(line);
}
((HttpPut) base).setEntity(new StringEntity(sb.toString()));
} else {
resp.setStatus(HttpServletResponse.SC_METHOD_NOT_ALLOWED);
return;
}
@SuppressWarnings("unchecked")
Enumeration<String> names = req.getHeaderNames();
while(names.hasMoreElements()) {
String name = names.nextElement();
if(passThroughHeaders.contains(name)) {
String value = req.getHeader(name);
if (LOG.isDebugEnabled()) {
LOG.debug("REQ HEADER: {} : {}", name, value);
}
base.setHeader(name, value);
}
}
String user = req.getRemoteUser();
if (user != null && !user.isEmpty()) {
base.setHeader("Cookie",
PROXY_USER_COOKIE_NAME + "=" + URLEncoder.encode(user, "ASCII"));
}
OutputStream out = resp.getOutputStream();
try {
HttpResponse httpResp = client.execute(base);
resp.setStatus(httpResp.getStatusLine().getStatusCode());
for (Header header : httpResp.getAllHeaders()) {
resp.setHeader(header.getName(), header.getValue());
}
if (c != null) {
resp.addCookie(c);
}
InputStream in = httpResp.getEntity().getContent();
if (in != null) {
IOUtils.copyBytes(in, out, 4096, true);
}
} finally {
base.releaseConnection();
}
}
private static String getCheckCookieName(ApplicationId id){
return "checked_"+id;
}
private static Cookie makeCheckCookie(ApplicationId id, boolean isSet) {
Cookie c = new Cookie(getCheckCookieName(id),String.valueOf(isSet));
c.setPath(ProxyUriUtils.getPath(id));
c.setMaxAge(60 * 60 * 2); //2 hours in seconds
return c;
}
private boolean isSecurityEnabled() {
Boolean b = (Boolean) getServletContext()
.getAttribute(WebAppProxy.IS_SECURITY_ENABLED_ATTRIBUTE);
return b != null ? b : false;
}
private ApplicationReport getApplicationReport(ApplicationId id)
throws IOException, YarnException {
return ((AppReportFetcher) getServletContext()
.getAttribute(WebAppProxy.FETCHER_ATTRIBUTE)).getApplicationReport(id);
}
private String getProxyHost() throws IOException {
return ((String) getServletContext()
.getAttribute(WebAppProxy.PROXY_HOST_ATTRIBUTE));
}
@Override
protected void doGet(HttpServletRequest req, HttpServletResponse resp)
throws ServletException, IOException {
methodAction(req, resp, HTTP.GET);
}
@Override
protected final void doPut(final HttpServletRequest req,
final HttpServletResponse resp) throws ServletException, IOException {
methodAction(req, resp, HTTP.PUT);
}
/**
* The action against the HTTP method.
* @param req the HttpServletRequest
* @param resp the HttpServletResponse
* @param method the HTTP method
* @throws ServletException
* @throws IOException
*/
private void methodAction(final HttpServletRequest req,
final HttpServletResponse resp,
final HTTP method) throws ServletException, IOException {
try {
String userApprovedParamS =
req.getParameter(ProxyUriUtils.PROXY_APPROVAL_PARAM);
boolean userWasWarned = false;
boolean userApproved = Boolean.valueOf(userApprovedParamS);
boolean securityEnabled = isSecurityEnabled();
final String remoteUser = req.getRemoteUser();
final String pathInfo = req.getPathInfo();
String[] parts = null;
if (pathInfo != null) {
parts = pathInfo.split("/", 3);
}
if(parts == null || parts.length < 2) {
LOG.warn("{} gave an invalid proxy path {}", remoteUser, pathInfo);
notFound(resp, "Your path appears to be formatted incorrectly.");
return;
}
//parts[0] is empty because path info always starts with a /
String appId = parts[1];
String rest = parts.length > 2 ? parts[2] : "";
ApplicationId id = Apps.toAppID(appId);
if(id == null) {
LOG.warn("{} attempting to access {} that is invalid",
remoteUser, appId);
notFound(resp, appId + " appears to be formatted incorrectly.");
return;
}
if(securityEnabled) {
String cookieName = getCheckCookieName(id);
Cookie[] cookies = req.getCookies();
if (cookies != null) {
for (Cookie c : cookies) {
if (cookieName.equals(c.getName())) {
userWasWarned = true;
userApproved = userApproved || Boolean.valueOf(c.getValue());
break;
}
}
}
}
boolean checkUser = securityEnabled && (!userWasWarned || !userApproved);
ApplicationReport applicationReport;
try {
applicationReport = getApplicationReport(id);
} catch (ApplicationNotFoundException e) {
applicationReport = null;
}
if(applicationReport == null) {
LOG.warn("{} attempting to access {} that was not found",
remoteUser, id);
URI toFetch =
ProxyUriUtils
.getUriFromTrackingPlugins(id, this.trackingUriPlugins);
if (toFetch != null) {
ProxyUtils.sendRedirect(req, resp, toFetch.toString());
return;
}
notFound(resp, "Application " + appId + " could not be found, " +
"please try the history server");
return;
}
String original = applicationReport.getOriginalTrackingUrl();
URI trackingUri;
// fallback to ResourceManager's app page if no tracking URI provided
if(original == null || original.equals("N/A")) {
ProxyUtils.sendRedirect(req, resp,
StringHelper.pjoin(rmAppPageUrlBase, id.toString()));
return;
} else {
if (ProxyUriUtils.getSchemeFromUrl(original).isEmpty()) {
trackingUri = ProxyUriUtils.getUriFromAMUrl(
WebAppUtils.getHttpSchemePrefix(conf), original);
} else {
trackingUri = new URI(original);
}
}
String runningUser = applicationReport.getUser();
if(checkUser && !runningUser.equals(remoteUser)) {
LOG.info("Asking {} if they want to connect to the "
+ "app master GUI of {} owned by {}",
remoteUser, appId, runningUser);
warnUserPage(resp, ProxyUriUtils.getPathAndQuery(id, rest,
req.getQueryString(), true), runningUser, id);
return;
}
// Append the user-provided path and query parameter to the original
// tracking url.
List<NameValuePair> queryPairs =
URLEncodedUtils.parse(req.getQueryString(), null);
UriBuilder builder = UriBuilder.fromUri(trackingUri);
for (NameValuePair pair : queryPairs) {
builder.queryParam(pair.getName(), pair.getValue());
}
URI toFetch = builder.path(rest).build();
LOG.info("{} is accessing unchecked {}"
+ " which is the app master GUI of {} owned by {}",
remoteUser, toFetch, appId, runningUser);
switch (applicationReport.getYarnApplicationState()) {
case KILLED:
case FINISHED:
case FAILED:
ProxyUtils.sendRedirect(req, resp, toFetch.toString());
return;
default:
// fall out of the switch
}
Cookie c = null;
if (userWasWarned && userApproved) {
c = makeCheckCookie(id, true);
}
proxyLink(req, resp, toFetch, c, getProxyHost(), method);
} catch(URISyntaxException | YarnException e) {
throw new IOException(e);
}
}
/**
* This method is used by Java object deserialization, to fill in the
* transient {@link #trackingUriPlugins} field.
* See {@link ObjectInputStream#defaultReadObject()}
* <p>
* <I>Do not remove</I>
* <p>
* Yarn isn't currently serializing this class, but findbugs
* complains in its absence.
*
*
* @param input source
* @throws IOException IO failure
* @throws ClassNotFoundException classloader fun
*/
private void readObject(ObjectInputStream input)
throws IOException, ClassNotFoundException {
input.defaultReadObject();
conf = new YarnConfiguration();
this.trackingUriPlugins =
conf.getInstances(YarnConfiguration.YARN_TRACKING_URL_GENERATOR,
TrackingUriPlugin.class);
}
}
| 15,673 | 33.524229 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/WebAppProxy.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.webproxy;
import java.io.IOException;
import java.net.URI;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.http.HttpServer2;
import org.apache.hadoop.security.authorize.AccessControlList;
import org.apache.hadoop.service.AbstractService;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
import org.apache.hadoop.yarn.webapp.util.WebAppUtils;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import com.google.common.annotations.VisibleForTesting;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class WebAppProxy extends AbstractService {
public static final String FETCHER_ATTRIBUTE= "AppUrlFetcher";
public static final String IS_SECURITY_ENABLED_ATTRIBUTE = "IsSecurityEnabled";
public static final String PROXY_HOST_ATTRIBUTE = "proxyHost";
private static final Logger LOG = LoggerFactory.getLogger(
WebAppProxy.class);
private HttpServer2 proxyServer = null;
private String bindAddress = null;
private int port = 0;
private AccessControlList acl = null;
private AppReportFetcher fetcher = null;
private boolean isSecurityEnabled = false;
private String proxyHost = null;
public WebAppProxy() {
super(WebAppProxy.class.getName());
}
@Override
protected void serviceInit(Configuration conf) throws Exception {
String auth = conf.get(CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION);
if (auth == null || "simple".equals(auth)) {
isSecurityEnabled = false;
} else if ("kerberos".equals(auth)) {
isSecurityEnabled = true;
} else {
LOG.warn("Unrecongized attribute value for " +
CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION +
" of " + auth);
}
String proxy = WebAppUtils.getProxyHostAndPort(conf);
String[] proxyParts = proxy.split(":");
proxyHost = proxyParts[0];
fetcher = new AppReportFetcher(conf);
bindAddress = conf.get(YarnConfiguration.PROXY_ADDRESS);
if(bindAddress == null || bindAddress.isEmpty()) {
throw new YarnRuntimeException(YarnConfiguration.PROXY_ADDRESS +
" is not set so the proxy will not run.");
}
LOG.info("Instantiating Proxy at " + bindAddress);
String[] parts = StringUtils.split(bindAddress, ':');
port = 0;
if (parts.length == 2) {
bindAddress = parts[0];
port = Integer.parseInt(parts[1]);
}
acl = new AccessControlList(conf.get(YarnConfiguration.YARN_ADMIN_ACL,
YarnConfiguration.DEFAULT_YARN_ADMIN_ACL));
super.serviceInit(conf);
}
@Override
protected void serviceStart() throws Exception {
try {
Configuration conf = getConfig();
HttpServer2.Builder b = new HttpServer2.Builder()
.setName("proxy")
.addEndpoint(
URI.create(WebAppUtils.getHttpSchemePrefix(conf) + bindAddress
+ ":" + port)).setFindPort(port == 0).setConf(getConfig())
.setACL(acl);
if (YarnConfiguration.useHttps(conf)) {
WebAppUtils.loadSslConfiguration(b);
}
proxyServer = b.build();
proxyServer.addServlet(ProxyUriUtils.PROXY_SERVLET_NAME,
ProxyUriUtils.PROXY_PATH_SPEC, WebAppProxyServlet.class);
proxyServer.setAttribute(FETCHER_ATTRIBUTE, fetcher);
proxyServer
.setAttribute(IS_SECURITY_ENABLED_ATTRIBUTE, isSecurityEnabled);
proxyServer.setAttribute(PROXY_HOST_ATTRIBUTE, proxyHost);
proxyServer.start();
} catch (IOException e) {
LOG.error("Could not start proxy web server",e);
throw e;
}
super.serviceStart();
}
@Override
protected void serviceStop() throws Exception {
if(proxyServer != null) {
try {
proxyServer.stop();
} catch (Exception e) {
LOG.error("Error stopping proxy web server", e);
throw new YarnRuntimeException("Error stopping proxy web server",e);
}
}
if(this.fetcher != null) {
this.fetcher.stop();
}
super.serviceStop();
}
public void join() {
if(proxyServer != null) {
try {
proxyServer.join();
} catch (InterruptedException e) {
// ignored
}
}
}
@VisibleForTesting
String getBindAddress() {
return bindAddress + ":" + port;
}
}
| 5,187 | 33.586667 | 84 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/AppReportFetcher.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.webproxy;
import java.io.IOException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.yarn.api.ApplicationClientProtocol;
import org.apache.hadoop.yarn.api.ApplicationHistoryProtocol;
import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportResponse;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ApplicationReport;
import org.apache.hadoop.yarn.client.AHSProxy;
import org.apache.hadoop.yarn.client.ClientRMProxy;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.exceptions.ApplicationNotFoundException;
import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
import org.apache.hadoop.yarn.factories.RecordFactory;
import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
/**
* This class abstracts away how ApplicationReports are fetched.
*/
public class AppReportFetcher {
private static final Log LOG = LogFactory.getLog(AppReportFetcher.class);
private final Configuration conf;
private final ApplicationClientProtocol applicationsManager;
private final ApplicationHistoryProtocol historyManager;
private final RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(null);
private boolean isAHSEnabled;
/**
* Create a new Connection to the RM/Application History Server
* to fetch Application reports.
* @param conf the conf to use to know where the RM is.
*/
public AppReportFetcher(Configuration conf) {
if (conf.getBoolean(YarnConfiguration.APPLICATION_HISTORY_ENABLED,
YarnConfiguration.DEFAULT_APPLICATION_HISTORY_ENABLED)) {
isAHSEnabled = true;
}
this.conf = conf;
try {
applicationsManager = ClientRMProxy.createRMProxy(conf,
ApplicationClientProtocol.class);
if (isAHSEnabled) {
historyManager = getAHSProxy(conf);
} else {
this.historyManager = null;
}
} catch (IOException e) {
throw new YarnRuntimeException(e);
}
}
/**
* Create a direct connection to RM instead of a remote connection when
* the proxy is running as part of the RM. Also create a remote connection to
* Application History Server if it is enabled.
* @param conf the configuration to use
* @param applicationsManager what to use to get the RM reports.
*/
public AppReportFetcher(Configuration conf, ApplicationClientProtocol applicationsManager) {
if (conf.getBoolean(YarnConfiguration.APPLICATION_HISTORY_ENABLED,
YarnConfiguration.DEFAULT_APPLICATION_HISTORY_ENABLED)) {
isAHSEnabled = true;
}
this.conf = conf;
this.applicationsManager = applicationsManager;
if (isAHSEnabled) {
try {
historyManager = getAHSProxy(conf);
} catch (IOException e) {
throw new YarnRuntimeException(e);
}
} else {
this.historyManager = null;
}
}
protected ApplicationHistoryProtocol getAHSProxy(Configuration configuration)
throws IOException {
return AHSProxy.createAHSProxy(configuration,
ApplicationHistoryProtocol.class,
configuration.getSocketAddr(YarnConfiguration.TIMELINE_SERVICE_ADDRESS,
YarnConfiguration.DEFAULT_TIMELINE_SERVICE_ADDRESS,
YarnConfiguration.DEFAULT_TIMELINE_SERVICE_PORT));
}
/**
* Get an application report for the specified application id from the RM and
* fall back to the Application History Server if not found in RM.
* @param appId id of the application to get.
* @return the ApplicationReport for the appId.
* @throws YarnException on any error.
* @throws IOException
*/
public ApplicationReport getApplicationReport(ApplicationId appId)
throws YarnException, IOException {
GetApplicationReportRequest request = recordFactory
.newRecordInstance(GetApplicationReportRequest.class);
request.setApplicationId(appId);
GetApplicationReportResponse response;
try {
response = applicationsManager.getApplicationReport(request);
} catch (YarnException e) {
if (!isAHSEnabled) {
// Just throw it as usual if historyService is not enabled.
throw e;
}
// Even if history-service is enabled, treat all exceptions still the same
// except the following
if (!(e.getClass() == ApplicationNotFoundException.class)) {
throw e;
}
response = historyManager.getApplicationReport(request);
}
return response.getApplicationReport();
}
public void stop() {
if (this.applicationsManager != null) {
RPC.stopProxy(this.applicationsManager);
}
if (this.historyManager != null) {
RPC.stopProxy(this.historyManager);
}
}
}
| 5,787 | 37.331126 | 94 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/WebAppProxyServer.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.webproxy;
import java.io.IOException;
import java.net.InetSocketAddress;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.service.CompositeService;
import org.apache.hadoop.util.ExitUtil;
import org.apache.hadoop.util.GenericOptionsParser;
import org.apache.hadoop.util.ShutdownHookManager;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.yarn.YarnUncaughtExceptionHandler;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* ProxyServer will sit in between the end user and AppMaster
* web interfaces.
*/
public class WebAppProxyServer extends CompositeService {
/**
* Priority of the ResourceManager shutdown hook.
*/
public static final int SHUTDOWN_HOOK_PRIORITY = 30;
private static final Logger LOG = LoggerFactory.getLogger(
WebAppProxyServer.class);
private WebAppProxy proxy = null;
public WebAppProxyServer() {
super(WebAppProxyServer.class.getName());
}
@Override
protected void serviceInit(Configuration conf) throws Exception {
Configuration config = new YarnConfiguration(conf);
doSecureLogin(conf);
proxy = new WebAppProxy();
addService(proxy);
super.serviceInit(config);
}
/**
* Log in as the Kerberose principal designated for the proxy
* @param conf the configuration holding this information in it.
* @throws IOException on any error.
*/
protected void doSecureLogin(Configuration conf) throws IOException {
InetSocketAddress socAddr = getBindAddress(conf);
SecurityUtil.login(conf, YarnConfiguration.PROXY_KEYTAB,
YarnConfiguration.PROXY_PRINCIPAL, socAddr.getHostName());
}
/**
* Retrieve PROXY bind address from configuration
*
* @param conf
* @return InetSocketAddress
*/
public static InetSocketAddress getBindAddress(Configuration conf) {
return conf.getSocketAddr(YarnConfiguration.PROXY_ADDRESS,
YarnConfiguration.DEFAULT_PROXY_ADDRESS,
YarnConfiguration.DEFAULT_PROXY_PORT);
}
public static void main(String[] args) {
Thread.setDefaultUncaughtExceptionHandler(new YarnUncaughtExceptionHandler());
StringUtils.startupShutdownMessage(WebAppProxyServer.class, args, LOG);
try {
YarnConfiguration configuration = new YarnConfiguration();
new GenericOptionsParser(configuration, args);
WebAppProxyServer proxyServer = startServer(configuration);
proxyServer.proxy.join();
} catch (Throwable t) {
ExitUtil.terminate(-1, t);
}
}
/**
* Start proxy server.
*
* @return proxy server instance.
*/
protected static WebAppProxyServer startServer(Configuration configuration)
throws Exception {
WebAppProxyServer proxy = new WebAppProxyServer();
ShutdownHookManager.get().addShutdownHook(
new CompositeServiceShutdownHook(proxy), SHUTDOWN_HOOK_PRIORITY);
proxy.init(configuration);
proxy.start();
return proxy;
}
}
| 3,872 | 32.102564 | 82 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/ProxyUriUtils.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.webproxy;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.util.TrackingUriPlugin;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.UnsupportedEncodingException;
import java.net.URI;
import java.net.URISyntaxException;
import java.net.URLEncoder;
import java.util.List;
import static org.apache.hadoop.yarn.util.StringHelper.ujoin;
public class ProxyUriUtils {
@SuppressWarnings("unused")
private static final Logger LOG = LoggerFactory.getLogger(
ProxyUriUtils.class);
/**Name of the servlet to use when registering the proxy servlet. */
public static final String PROXY_SERVLET_NAME = "proxy";
/**Base path where the proxy servlet will handle requests.*/
public static final String PROXY_BASE = "/proxy/";
/**Path Specification for the proxy servlet.*/
public static final String PROXY_PATH_SPEC = PROXY_BASE+"*";
/**Query Parameter indicating that the URI was approved.*/
public static final String PROXY_APPROVAL_PARAM = "proxyapproved";
private static String uriEncode(Object o) {
try {
assert (o != null) : "o canot be null";
return URLEncoder.encode(o.toString(), "UTF-8");
} catch (UnsupportedEncodingException e) {
//This should never happen
throw new RuntimeException("UTF-8 is not supported by this system?", e);
}
}
/**
* Get the proxied path for an application.
* @param id the application id to use.
* @return the base path to that application through the proxy.
*/
public static String getPath(ApplicationId id) {
if(id == null) {
throw new IllegalArgumentException("Application id cannot be null ");
}
return ujoin(PROXY_BASE, uriEncode(id));
}
/**
* Get the proxied path for an application.
* @param id the application id to use.
* @param path the rest of the path to the application.
* @return the base path to that application through the proxy.
*/
public static String getPath(ApplicationId id, String path) {
if(path == null) {
return getPath(id);
} else {
return ujoin(getPath(id), path);
}
}
/**
* Get the proxied path for an application
* @param id the id of the application
* @param path the path of the application.
* @param query the query parameters
* @param approved true if the user has approved accessing this app.
* @return the proxied path for this app.
*/
public static String getPathAndQuery(ApplicationId id, String path,
String query, boolean approved) {
StringBuilder newp = new StringBuilder();
newp.append(getPath(id, path));
boolean first = appendQuery(newp, query, true);
if(approved) {
appendQuery(newp, PROXY_APPROVAL_PARAM+"=true", first);
}
return newp.toString();
}
private static boolean appendQuery(StringBuilder builder, String query,
boolean first) {
if(query != null && !query.isEmpty()) {
if(first && !query.startsWith("?")) {
builder.append('?');
}
if(!first && !query.startsWith("&")) {
builder.append('&');
}
builder.append(query);
return false;
}
return first;
}
/**
* Get a proxied URI for the original URI.
* @param originalUri the original URI to go through the proxy, or null if
* a default path "/" can be used.
* @param proxyUri the URI of the proxy itself, scheme, host and port are used.
* @param id the id of the application
* @return the proxied URI
*/
public static URI getProxyUri(URI originalUri, URI proxyUri,
ApplicationId id) {
try {
String path = getPath(id, originalUri == null ? "/" : originalUri.getPath());
return new URI(proxyUri.getScheme(), proxyUri.getAuthority(), path,
originalUri == null ? null : originalUri.getQuery(),
originalUri == null ? null : originalUri.getFragment());
} catch (URISyntaxException e) {
throw new RuntimeException("Could not proxify "+originalUri,e);
}
}
/**
* Create a URI form a no scheme Url, such as is returned by the AM.
* @param noSchemeUrl the URL formate returned by an AM
* @return a URI with an http scheme
* @throws URISyntaxException if the url is not formatted correctly.
*/
public static URI getUriFromAMUrl(String scheme, String noSchemeUrl)
throws URISyntaxException {
if (getSchemeFromUrl(noSchemeUrl).isEmpty()) {
/*
* check is made to make sure if AM reports with scheme then it will be
* used by default otherwise it will default to the one configured using
* "yarn.http.policy".
*/
return new URI(scheme + noSchemeUrl);
} else {
return new URI(noSchemeUrl);
}
}
/**
* Returns the first valid tracking link, if any, from the given id from the
* given list of plug-ins, if any.
*
* @param id the id of the application for which the tracking link is desired
* @param trackingUriPlugins list of plugins from which to get the tracking link
* @return the desired link if possible, otherwise null
* @throws URISyntaxException
*/
public static URI getUriFromTrackingPlugins(ApplicationId id,
List<TrackingUriPlugin> trackingUriPlugins)
throws URISyntaxException {
URI toRet = null;
for(TrackingUriPlugin plugin : trackingUriPlugins)
{
toRet = plugin.getTrackingUri(id);
if (toRet != null)
{
return toRet;
}
}
return null;
}
/**
* Returns the scheme if present in the url
* eg. "https://issues.apache.org/jira/browse/YARN" {@literal ->} "https"
*/
public static String getSchemeFromUrl(String url) {
int index = 0;
if (url != null) {
index = url.indexOf("://");
}
if (index > 0) {
return url.substring(0, index);
} else {
return "";
}
}
}
| 6,710 | 32.723618 | 83 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/ProxyUtils.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.webproxy;
import org.apache.hadoop.yarn.webapp.MimeType;
import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.servlet.ServletException;
import javax.servlet.ServletRequest;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import java.io.IOException;
import java.io.PrintWriter;
import java.util.EnumSet;
/**
* Class containing general purpose proxy utilities
*/
public class ProxyUtils {
private static final Logger LOG = LoggerFactory.getLogger(
ProxyUtils.class);
public static final String E_HTTP_HTTPS_ONLY =
"This filter only works for HTTP/HTTPS";
public static final String LOCATION = "Location";
public static class _ implements Hamlet._ {
//Empty
}
public static class Page extends Hamlet {
Page(PrintWriter out) {
super(out, 0, false);
}
public HTML<ProxyUtils._> html() {
return new HTML<>("html", null, EnumSet.of(EOpt.ENDTAG));
}
}
/**
* Handle redirects with a status code that can in future support verbs other
* than GET, thus supporting full REST functionality.
* <p>
* The target URL is included in the redirect text returned
* <p>
* At the end of this method, the output stream is closed.
*
* @param request request (hence: the verb and any other information
* relevant to a redirect)
* @param response the response
* @param target the target URL -unencoded
*
*/
public static void sendRedirect(HttpServletRequest request,
HttpServletResponse response,
String target)
throws IOException {
if (LOG.isDebugEnabled()) {
LOG.debug("Redirecting {} {} to {}",
request.getMethod(),
request.getRequestURI(),
target);
}
String location = response.encodeRedirectURL(target);
response.setStatus(HttpServletResponse.SC_FOUND);
response.setHeader(LOCATION, location);
response.setContentType(MimeType.HTML);
PrintWriter writer = response.getWriter();
Page p = new Page(writer);
p.html()
.head().title("Moved")._()
.body()
.h1("Moved")
.div()
._("Content has moved ")
.a(location, "here")._()
._()._();
writer.close();
}
/**
* Output 404 with appropriate message.
* @param resp the http response.
* @param message the message to include on the page.
* @throws IOException on any error.
*/
public static void notFound(HttpServletResponse resp, String message)
throws IOException {
resp.setStatus(HttpServletResponse.SC_NOT_FOUND);
resp.setContentType(MimeType.HTML);
Page p = new Page(resp.getWriter());
p.html().
h1(message).
_();
}
/**
* Reject any request that isn't from an HTTP servlet
* @param req request
* @throws ServletException if the request is of the wrong type
*/
public static void rejectNonHttpRequests(ServletRequest req) throws
ServletException {
if (!(req instanceof HttpServletRequest)) {
throw new ServletException(E_HTTP_HTTPS_ONLY);
}
}
}
| 3,995 | 30.21875 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/amfilter/AmIpPrincipal.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.webproxy.amfilter;
import java.security.Principal;
public class AmIpPrincipal implements Principal {
private final String name;
public AmIpPrincipal(String name) {
this.name = name;
}
@Override
public String getName() {
return name;
}
}
| 1,095 | 30.314286 | 74 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/amfilter/AmFilterInitializer.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.webproxy.amfilter;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.http.FilterContainer;
import org.apache.hadoop.http.FilterInitializer;
import org.apache.hadoop.yarn.api.ApplicationConstants;
import org.apache.hadoop.yarn.webapp.util.WebAppUtils;
import com.google.common.annotations.VisibleForTesting;
public class AmFilterInitializer extends FilterInitializer {
private static final String FILTER_NAME = "AM_PROXY_FILTER";
private static final String FILTER_CLASS = AmIpFilter.class.getCanonicalName();
@Override
public void initFilter(FilterContainer container, Configuration conf) {
Map<String, String> params = new HashMap<>();
List<String> proxies = WebAppUtils.getProxyHostsAndPortsForAmFilter(conf);
StringBuilder sb = new StringBuilder();
for (String proxy : proxies) {
sb.append(proxy.split(":")[0]).append(AmIpFilter.PROXY_HOSTS_DELIMITER);
}
sb.setLength(sb.length() - 1);
params.put(AmIpFilter.PROXY_HOSTS, sb.toString());
String prefix = WebAppUtils.getHttpSchemePrefix(conf);
String proxyBase = getApplicationWebProxyBase();
sb = new StringBuilder();
for (String proxy : proxies) {
sb.append(prefix).append(proxy).append(proxyBase)
.append(AmIpFilter.PROXY_HOSTS_DELIMITER);
}
sb.setLength(sb.length() - 1);
params.put(AmIpFilter.PROXY_URI_BASES, sb.toString());
container.addFilter(FILTER_NAME, FILTER_CLASS, params);
}
@VisibleForTesting
protected String getApplicationWebProxyBase() {
return System.getenv(ApplicationConstants.APPLICATION_WEB_PROXY_BASE_ENV);
}
}
| 2,518 | 37.753846 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/amfilter/AmIpFilter.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.webproxy.amfilter;
import java.io.IOException;
import java.net.InetAddress;
import java.net.MalformedURLException;
import java.net.URL;
import java.net.UnknownHostException;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import javax.servlet.Filter;
import javax.servlet.FilterChain;
import javax.servlet.FilterConfig;
import javax.servlet.ServletException;
import javax.servlet.ServletRequest;
import javax.servlet.ServletResponse;
import javax.servlet.http.Cookie;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.yarn.conf.HAUtil;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.server.webproxy.ProxyUtils;
import org.apache.hadoop.yarn.server.webproxy.WebAppProxyServlet;
import org.apache.hadoop.yarn.util.RMHAUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@Public
public class AmIpFilter implements Filter {
private static final Logger LOG = LoggerFactory.getLogger(AmIpFilter.class);
@Deprecated
public static final String PROXY_HOST = "PROXY_HOST";
@Deprecated
public static final String PROXY_URI_BASE = "PROXY_URI_BASE";
public static final String PROXY_HOSTS = "PROXY_HOSTS";
public static final String PROXY_HOSTS_DELIMITER = ",";
public static final String PROXY_URI_BASES = "PROXY_URI_BASES";
public static final String PROXY_URI_BASES_DELIMITER = ",";
//update the proxy IP list about every 5 min
private static final long updateInterval = 5 * 60 * 1000;
private String[] proxyHosts;
private Set<String> proxyAddresses = null;
private long lastUpdate;
private Map<String, String> proxyUriBases;
@Override
public void init(FilterConfig conf) throws ServletException {
// Maintain for backwards compatibility
if (conf.getInitParameter(PROXY_HOST) != null
&& conf.getInitParameter(PROXY_URI_BASE) != null) {
proxyHosts = new String[]{conf.getInitParameter(PROXY_HOST)};
proxyUriBases = new HashMap<>(1);
proxyUriBases.put("dummy", conf.getInitParameter(PROXY_URI_BASE));
} else {
proxyHosts = conf.getInitParameter(PROXY_HOSTS)
.split(PROXY_HOSTS_DELIMITER);
String[] proxyUriBasesArr = conf.getInitParameter(PROXY_URI_BASES)
.split(PROXY_URI_BASES_DELIMITER);
proxyUriBases = new HashMap<>(proxyUriBasesArr.length);
for (String proxyUriBase : proxyUriBasesArr) {
try {
URL url = new URL(proxyUriBase);
proxyUriBases.put(url.getHost() + ":" + url.getPort(), proxyUriBase);
} catch(MalformedURLException e) {
LOG.warn("{} does not appear to be a valid URL", proxyUriBase, e);
}
}
}
}
protected Set<String> getProxyAddresses() throws ServletException {
long now = System.currentTimeMillis();
synchronized(this) {
if(proxyAddresses == null || (lastUpdate + updateInterval) >= now) {
proxyAddresses = new HashSet<>();
for (String proxyHost : proxyHosts) {
try {
for(InetAddress add : InetAddress.getAllByName(proxyHost)) {
if (LOG.isDebugEnabled()) {
LOG.debug("proxy address is: {}", add.getHostAddress());
}
proxyAddresses.add(add.getHostAddress());
}
lastUpdate = now;
} catch (UnknownHostException e) {
LOG.warn("Could not locate {} - skipping", proxyHost, e);
}
}
if (proxyAddresses.isEmpty()) {
throw new ServletException("Could not locate any of the proxy hosts");
}
}
return proxyAddresses;
}
}
@Override
public void destroy() {
//Empty
}
@Override
public void doFilter(ServletRequest req, ServletResponse resp,
FilterChain chain) throws IOException, ServletException {
ProxyUtils.rejectNonHttpRequests(req);
HttpServletRequest httpReq = (HttpServletRequest)req;
HttpServletResponse httpResp = (HttpServletResponse)resp;
if (LOG.isDebugEnabled()) {
LOG.debug("Remote address for request is: {}", httpReq.getRemoteAddr());
}
if (!getProxyAddresses().contains(httpReq.getRemoteAddr())) {
String redirectUrl = findRedirectUrl();
String target = redirectUrl + httpReq.getRequestURI();
ProxyUtils.sendRedirect(httpReq, httpResp, target);
return;
}
String user = null;
if (httpReq.getCookies() != null) {
for(Cookie c: httpReq.getCookies()) {
if(WebAppProxyServlet.PROXY_USER_COOKIE_NAME.equals(c.getName())){
user = c.getValue();
break;
}
}
}
if (user == null) {
if (LOG.isDebugEnabled()) {
LOG.debug("Could not find " + WebAppProxyServlet.PROXY_USER_COOKIE_NAME
+ " cookie, so user will not be set");
}
chain.doFilter(req, resp);
} else {
final AmIpPrincipal principal = new AmIpPrincipal(user);
ServletRequest requestWrapper = new AmIpServletRequestWrapper(httpReq,
principal);
chain.doFilter(requestWrapper, resp);
}
}
protected String findRedirectUrl() throws ServletException {
String addr;
if (proxyUriBases.size() == 1) { // external proxy or not RM HA
addr = proxyUriBases.values().iterator().next();
} else { // RM HA
YarnConfiguration conf = new YarnConfiguration();
String activeRMId = RMHAUtils.findActiveRMHAId(conf);
String addressPropertyPrefix = YarnConfiguration.useHttps(conf)
? YarnConfiguration.RM_WEBAPP_HTTPS_ADDRESS
: YarnConfiguration.RM_WEBAPP_ADDRESS;
String host = conf.get(
HAUtil.addSuffix(addressPropertyPrefix, activeRMId));
addr = proxyUriBases.get(host);
}
if (addr == null) {
throw new ServletException(
"Could not determine the proxy server for redirection");
}
return addr;
}
}
| 6,898 | 35.502646 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/amfilter/AmIpServletRequestWrapper.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.webproxy.amfilter;
import java.security.Principal;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletRequestWrapper;
public class AmIpServletRequestWrapper extends HttpServletRequestWrapper {
private final AmIpPrincipal principal;
public AmIpServletRequestWrapper(HttpServletRequest request,
AmIpPrincipal principal) {
super(request);
this.principal = principal;
}
@Override
public Principal getUserPrincipal() {
return principal;
}
@Override
public String getRemoteUser() {
return principal.getName();
}
@Override
public boolean isUserInRole(String role) {
//No role info so far
return false;
}
}
| 1,524 | 28.326923 | 74 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/MiniYARNCluster.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server;
import java.io.File;
import java.io.IOException;
import java.net.InetAddress;
import java.net.UnknownHostException;
import java.util.Collection;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileContext;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.ha.HAServiceProtocol;
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
import org.apache.hadoop.service.AbstractService;
import org.apache.hadoop.service.CompositeService;
import org.apache.hadoop.util.Shell;
import org.apache.hadoop.util.Shell.ShellCommandExecutor;
import org.apache.hadoop.yarn.api.protocolrecords.GetClusterMetricsRequest;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.conf.HAUtil;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.event.Dispatcher;
import org.apache.hadoop.yarn.event.EventHandler;
import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
import org.apache.hadoop.yarn.factories.RecordFactory;
import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
import org.apache.hadoop.yarn.server.api.ResourceTracker;
import org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatRequest;
import org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatResponse;
import org.apache.hadoop.yarn.server.api.protocolrecords.RegisterNodeManagerRequest;
import org.apache.hadoop.yarn.server.api.protocolrecords.RegisterNodeManagerResponse;
import org.apache.hadoop.yarn.server.api.protocolrecords.UnRegisterNodeManagerRequest;
import org.apache.hadoop.yarn.server.api.protocolrecords.UnRegisterNodeManagerResponse;
import org.apache.hadoop.yarn.server.applicationhistoryservice.ApplicationHistoryServer;
import org.apache.hadoop.yarn.server.applicationhistoryservice.ApplicationHistoryStore;
import org.apache.hadoop.yarn.server.applicationhistoryservice.MemoryApplicationHistoryStore;
import org.apache.hadoop.yarn.server.nodemanager.Context;
import org.apache.hadoop.yarn.server.nodemanager.NodeHealthCheckerService;
import org.apache.hadoop.yarn.server.nodemanager.NodeManager;
import org.apache.hadoop.yarn.server.nodemanager.NodeStatusUpdater;
import org.apache.hadoop.yarn.server.nodemanager.NodeStatusUpdaterImpl;
import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
import org.apache.hadoop.yarn.server.resourcemanager.ResourceTrackerService;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptEvent;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptEventType;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.event.RMAppAttemptRegistrationEvent;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.event.RMAppAttemptUnregistrationEvent;
import org.apache.hadoop.yarn.server.timeline.MemoryTimelineStore;
import org.apache.hadoop.yarn.server.timeline.TimelineStore;
import org.apache.hadoop.yarn.server.timeline.recovery.MemoryTimelineStateStore;
import org.apache.hadoop.yarn.server.timeline.recovery.TimelineStateStore;
import org.apache.hadoop.yarn.webapp.util.WebAppUtils;
import com.google.common.annotations.VisibleForTesting;
/**
* Embedded Yarn minicluster for testcases that need to interact with a cluster.
* <p/>
* In a real cluster, resource request matching is done using the hostname, and
* by default Yarn minicluster works in the exact same way as a real cluster.
* <p/>
* If a testcase needs to use multiple nodes and exercise resource request
* matching to a specific node, then the property
* {@YarnConfiguration.RM_SCHEDULER_INCLUDE_PORT_IN_NODE_NAME} should be set
* <code>true</code> in the configuration used to initialize the minicluster.
* <p/>
* With this property set to <code>true</code>, the matching will be done using
* the <code>hostname:port</code> of the namenodes. In such case, the AM must
* do resource request using <code>hostname:port</code> as the location.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class MiniYARNCluster extends CompositeService {
private static final Log LOG = LogFactory.getLog(MiniYARNCluster.class);
// temp fix until metrics system can auto-detect itself running in unit test:
static {
DefaultMetricsSystem.setMiniClusterMode(true);
}
private NodeManager[] nodeManagers;
private ResourceManager[] resourceManagers;
private String[] rmIds;
private ApplicationHistoryServer appHistoryServer;
private boolean useFixedPorts;
private boolean useRpc = false;
private int failoverTimeout;
private ConcurrentMap<ApplicationAttemptId, Long> appMasters =
new ConcurrentHashMap<ApplicationAttemptId, Long>(16, 0.75f, 2);
private File testWorkDir;
// Number of nm-local-dirs per nodemanager
private int numLocalDirs;
// Number of nm-log-dirs per nodemanager
private int numLogDirs;
private boolean enableAHS;
/**
* @param testName name of the test
* @param numResourceManagers the number of resource managers in the cluster
* @param numNodeManagers the number of node managers in the cluster
* @param numLocalDirs the number of nm-local-dirs per nodemanager
* @param numLogDirs the number of nm-log-dirs per nodemanager
* @param enableAHS enable ApplicationHistoryServer or not
*/
public MiniYARNCluster(
String testName, int numResourceManagers, int numNodeManagers,
int numLocalDirs, int numLogDirs, boolean enableAHS) {
super(testName.replace("$", ""));
this.numLocalDirs = numLocalDirs;
this.numLogDirs = numLogDirs;
this.enableAHS = enableAHS;
String testSubDir = testName.replace("$", "");
File targetWorkDir = new File("target", testSubDir);
try {
FileContext.getLocalFSFileContext().delete(
new Path(targetWorkDir.getAbsolutePath()), true);
} catch (Exception e) {
LOG.warn("COULD NOT CLEANUP", e);
throw new YarnRuntimeException("could not cleanup test dir: "+ e, e);
}
if (Shell.WINDOWS) {
// The test working directory can exceed the maximum path length supported
// by some Windows APIs and cmd.exe (260 characters). To work around this,
// create a symlink in temporary storage with a much shorter path,
// targeting the full path to the test working directory. Then, use the
// symlink as the test working directory.
String targetPath = targetWorkDir.getAbsolutePath();
File link = new File(System.getProperty("java.io.tmpdir"),
String.valueOf(System.currentTimeMillis()));
String linkPath = link.getAbsolutePath();
try {
FileContext.getLocalFSFileContext().delete(new Path(linkPath), true);
} catch (IOException e) {
throw new YarnRuntimeException("could not cleanup symlink: " + linkPath, e);
}
// Guarantee target exists before creating symlink.
targetWorkDir.mkdirs();
ShellCommandExecutor shexec = new ShellCommandExecutor(
Shell.getSymlinkCommand(targetPath, linkPath));
try {
shexec.execute();
} catch (IOException e) {
throw new YarnRuntimeException(String.format(
"failed to create symlink from %s to %s, shell output: %s", linkPath,
targetPath, shexec.getOutput()), e);
}
this.testWorkDir = link;
} else {
this.testWorkDir = targetWorkDir;
}
resourceManagers = new ResourceManager[numResourceManagers];
nodeManagers = new NodeManager[numNodeManagers];
}
/**
* @param testName name of the test
* @param numResourceManagers the number of resource managers in the cluster
* @param numNodeManagers the number of node managers in the cluster
* @param numLocalDirs the number of nm-local-dirs per nodemanager
* @param numLogDirs the number of nm-log-dirs per nodemanager
*/
public MiniYARNCluster(
String testName, int numResourceManagers, int numNodeManagers,
int numLocalDirs, int numLogDirs) {
this(testName, numResourceManagers, numNodeManagers, numLocalDirs,
numLogDirs, false);
}
/**
* @param testName name of the test
* @param numNodeManagers the number of node managers in the cluster
* @param numLocalDirs the number of nm-local-dirs per nodemanager
* @param numLogDirs the number of nm-log-dirs per nodemanager
*/
public MiniYARNCluster(String testName, int numNodeManagers,
int numLocalDirs, int numLogDirs) {
this(testName, 1, numNodeManagers, numLocalDirs, numLogDirs);
}
@Override
public void serviceInit(Configuration conf) throws Exception {
useFixedPorts = conf.getBoolean(
YarnConfiguration.YARN_MINICLUSTER_FIXED_PORTS,
YarnConfiguration.DEFAULT_YARN_MINICLUSTER_FIXED_PORTS);
useRpc = conf.getBoolean(YarnConfiguration.YARN_MINICLUSTER_USE_RPC,
YarnConfiguration.DEFAULT_YARN_MINICLUSTER_USE_RPC);
failoverTimeout = conf.getInt(YarnConfiguration.RM_ZK_TIMEOUT_MS,
YarnConfiguration.DEFAULT_RM_ZK_TIMEOUT_MS);
if (useRpc && !useFixedPorts) {
throw new YarnRuntimeException("Invalid configuration!" +
" Minicluster can use rpc only when configured to use fixed ports");
}
conf.setBoolean(YarnConfiguration.IS_MINI_YARN_CLUSTER, true);
if (resourceManagers.length > 1) {
conf.setBoolean(YarnConfiguration.RM_HA_ENABLED, true);
if (conf.get(YarnConfiguration.RM_HA_IDS) == null) {
StringBuilder rmIds = new StringBuilder();
for (int i = 0; i < resourceManagers.length; i++) {
if (i != 0) {
rmIds.append(",");
}
rmIds.append("rm" + i);
}
conf.set(YarnConfiguration.RM_HA_IDS, rmIds.toString());
}
Collection<String> rmIdsCollection = HAUtil.getRMHAIds(conf);
rmIds = rmIdsCollection.toArray(new String[rmIdsCollection.size()]);
}
for (int i = 0; i < resourceManagers.length; i++) {
resourceManagers[i] = createResourceManager();
if (!useFixedPorts) {
if (HAUtil.isHAEnabled(conf)) {
setHARMConfiguration(i, conf);
} else {
setNonHARMConfiguration(conf);
}
}
addService(new ResourceManagerWrapper(i));
}
for(int index = 0; index < nodeManagers.length; index++) {
nodeManagers[index] =
useRpc ? new CustomNodeManager() : new ShortCircuitedNodeManager();
addService(new NodeManagerWrapper(index));
}
if(conf.getBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED,
YarnConfiguration.DEFAULT_TIMELINE_SERVICE_ENABLED) || enableAHS) {
addService(new ApplicationHistoryServerWrapper());
}
super.serviceInit(
conf instanceof YarnConfiguration ? conf : new YarnConfiguration(conf));
}
private void setNonHARMConfiguration(Configuration conf) {
String hostname = MiniYARNCluster.getHostname();
conf.set(YarnConfiguration.RM_ADDRESS, hostname + ":0");
conf.set(YarnConfiguration.RM_ADMIN_ADDRESS, hostname + ":0");
conf.set(YarnConfiguration.RM_SCHEDULER_ADDRESS, hostname + ":0");
conf.set(YarnConfiguration.RM_RESOURCE_TRACKER_ADDRESS, hostname + ":0");
WebAppUtils.setRMWebAppHostnameAndPort(conf, hostname, 0);
}
private void setHARMConfiguration(final int index, Configuration conf) {
String hostname = MiniYARNCluster.getHostname();
for (String confKey : YarnConfiguration.getServiceAddressConfKeys(conf)) {
conf.set(HAUtil.addSuffix(confKey, rmIds[index]), hostname + ":0");
}
}
private synchronized void initResourceManager(int index, Configuration conf) {
Configuration newConf = resourceManagers.length > 1 ?
new YarnConfiguration(conf) : conf;
if (HAUtil.isHAEnabled(newConf)) {
newConf.set(YarnConfiguration.RM_HA_ID, rmIds[index]);
}
resourceManagers[index].init(newConf);
resourceManagers[index].getRMContext().getDispatcher().register(
RMAppAttemptEventType.class,
new EventHandler<RMAppAttemptEvent>() {
public void handle(RMAppAttemptEvent event) {
if (event instanceof RMAppAttemptRegistrationEvent) {
appMasters.put(event.getApplicationAttemptId(),
event.getTimestamp());
} else if (event instanceof RMAppAttemptUnregistrationEvent) {
appMasters.remove(event.getApplicationAttemptId());
}
}
});
}
private synchronized void startResourceManager(final int index) {
try {
Thread rmThread = new Thread() {
public void run() {
resourceManagers[index].start();
}
};
rmThread.setName("RM-" + index);
rmThread.start();
int waitCount = 0;
while (resourceManagers[index].getServiceState() == STATE.INITED
&& waitCount++ < 60) {
LOG.info("Waiting for RM to start...");
Thread.sleep(1500);
}
if (resourceManagers[index].getServiceState() != STATE.STARTED) {
// RM could have failed.
throw new IOException(
"ResourceManager failed to start. Final state is "
+ resourceManagers[index].getServiceState());
}
} catch (Throwable t) {
throw new YarnRuntimeException(t);
}
Configuration conf = resourceManagers[index].getConfig();
LOG.info("MiniYARN ResourceManager address: " +
conf.get(YarnConfiguration.RM_ADDRESS));
LOG.info("MiniYARN ResourceManager web address: " +
WebAppUtils.getRMWebAppURLWithoutScheme(conf));
}
@InterfaceAudience.Private
@VisibleForTesting
public synchronized void stopResourceManager(int index) {
if (resourceManagers[index] != null) {
resourceManagers[index].stop();
resourceManagers[index] = null;
}
}
@InterfaceAudience.Private
@VisibleForTesting
public synchronized void restartResourceManager(int index)
throws InterruptedException {
if (resourceManagers[index] != null) {
resourceManagers[index].stop();
resourceManagers[index] = null;
}
resourceManagers[index] = new ResourceManager();
initResourceManager(index, getConfig());
startResourceManager(index);
}
public File getTestWorkDir() {
return testWorkDir;
}
/**
* In a HA cluster, go through all the RMs and find the Active RM. In a
* non-HA cluster, return the index of the only RM.
*
* @return index of the active RM or -1 if none of them turn active
*/
@InterfaceAudience.Private
@VisibleForTesting
public int getActiveRMIndex() {
if (resourceManagers.length == 1) {
return 0;
}
int numRetriesForRMBecomingActive = failoverTimeout / 100;
while (numRetriesForRMBecomingActive-- > 0) {
for (int i = 0; i < resourceManagers.length; i++) {
if (resourceManagers[i] == null) {
continue;
}
try {
if (HAServiceProtocol.HAServiceState.ACTIVE ==
resourceManagers[i].getRMContext().getRMAdminService()
.getServiceStatus().getState()) {
return i;
}
} catch (IOException e) {
throw new YarnRuntimeException("Couldn't read the status of " +
"a ResourceManger in the HA ensemble.", e);
}
}
try {
Thread.sleep(100);
} catch (InterruptedException e) {
throw new YarnRuntimeException("Interrupted while waiting for one " +
"of the ResourceManagers to become active");
}
}
return -1;
}
/**
* @return the active {@link ResourceManager} of the cluster,
* null if none of them are active.
*/
public ResourceManager getResourceManager() {
int activeRMIndex = getActiveRMIndex();
return activeRMIndex == -1
? null
: this.resourceManagers[activeRMIndex];
}
public ResourceManager getResourceManager(int i) {
return this.resourceManagers[i];
}
public NodeManager getNodeManager(int i) {
return this.nodeManagers[i];
}
public static String getHostname() {
try {
return InetAddress.getLocalHost().getHostName();
}
catch (UnknownHostException ex) {
throw new RuntimeException(ex);
}
}
private class ResourceManagerWrapper extends AbstractService {
private int index;
public ResourceManagerWrapper(int i) {
super(ResourceManagerWrapper.class.getName() + "_" + i);
index = i;
}
@Override
protected synchronized void serviceInit(Configuration conf)
throws Exception {
initResourceManager(index, conf);
super.serviceInit(conf);
}
@Override
protected synchronized void serviceStart() throws Exception {
startResourceManager(index);
Configuration conf = resourceManagers[index].getConfig();
LOG.info("MiniYARN ResourceManager address: " +
conf.get(YarnConfiguration.RM_ADDRESS));
LOG.info("MiniYARN ResourceManager web address: " + WebAppUtils
.getRMWebAppURLWithoutScheme(conf));
super.serviceStart();
}
private void waitForAppMastersToFinish(long timeoutMillis) throws InterruptedException {
long started = System.currentTimeMillis();
synchronized (appMasters) {
while (!appMasters.isEmpty() && System.currentTimeMillis() - started < timeoutMillis) {
appMasters.wait(1000);
}
}
if (!appMasters.isEmpty()) {
LOG.warn("Stopping RM while some app masters are still alive");
}
}
@Override
protected synchronized void serviceStop() throws Exception {
if (resourceManagers[index] != null) {
waitForAppMastersToFinish(5000);
resourceManagers[index].stop();
}
if (Shell.WINDOWS) {
// On Windows, clean up the short temporary symlink that was created to
// work around path length limitation.
String testWorkDirPath = testWorkDir.getAbsolutePath();
try {
FileContext.getLocalFSFileContext().delete(new Path(testWorkDirPath),
true);
} catch (IOException e) {
LOG.warn("could not cleanup symlink: " +
testWorkDir.getAbsolutePath());
}
}
super.serviceStop();
}
}
private class NodeManagerWrapper extends AbstractService {
int index = 0;
public NodeManagerWrapper(int i) {
super(NodeManagerWrapper.class.getName() + "_" + i);
index = i;
}
protected synchronized void serviceInit(Configuration conf)
throws Exception {
Configuration config = new YarnConfiguration(conf);
// create nm-local-dirs and configure them for the nodemanager
String localDirsString = prepareDirs("local", numLocalDirs);
config.set(YarnConfiguration.NM_LOCAL_DIRS, localDirsString);
// create nm-log-dirs and configure them for the nodemanager
String logDirsString = prepareDirs("log", numLogDirs);
config.set(YarnConfiguration.NM_LOG_DIRS, logDirsString);
config.setInt(YarnConfiguration.NM_PMEM_MB, config.getInt(
YarnConfiguration.YARN_MINICLUSTER_NM_PMEM_MB,
YarnConfiguration.DEFAULT_YARN_MINICLUSTER_NM_PMEM_MB));
config.set(YarnConfiguration.NM_ADDRESS,
MiniYARNCluster.getHostname() + ":0");
config.set(YarnConfiguration.NM_LOCALIZER_ADDRESS,
MiniYARNCluster.getHostname() + ":0");
WebAppUtils
.setNMWebAppHostNameAndPort(config,
MiniYARNCluster.getHostname(), 0);
// Disable resource checks by default
if (!config.getBoolean(
YarnConfiguration.YARN_MINICLUSTER_CONTROL_RESOURCE_MONITORING,
YarnConfiguration.
DEFAULT_YARN_MINICLUSTER_CONTROL_RESOURCE_MONITORING)) {
config.setBoolean(YarnConfiguration.NM_PMEM_CHECK_ENABLED, false);
config.setBoolean(YarnConfiguration.NM_VMEM_CHECK_ENABLED, false);
}
LOG.info("Starting NM: " + index);
nodeManagers[index].init(config);
super.serviceInit(config);
}
/**
* Create local/log directories
* @param dirType type of directories i.e. local dirs or log dirs
* @param numDirs number of directories
* @return the created directories as a comma delimited String
*/
private String prepareDirs(String dirType, int numDirs) {
File []dirs = new File[numDirs];
String dirsString = "";
for (int i = 0; i < numDirs; i++) {
dirs[i]= new File(testWorkDir, MiniYARNCluster.this.getName()
+ "-" + dirType + "Dir-nm-" + index + "_" + i);
dirs[i].mkdirs();
LOG.info("Created " + dirType + "Dir in " + dirs[i].getAbsolutePath());
String delimiter = (i > 0) ? "," : "";
dirsString = dirsString.concat(delimiter + dirs[i].getAbsolutePath());
}
return dirsString;
}
protected synchronized void serviceStart() throws Exception {
try {
new Thread() {
public void run() {
nodeManagers[index].start();
}
}.start();
int waitCount = 0;
while (nodeManagers[index].getServiceState() == STATE.INITED
&& waitCount++ < 60) {
LOG.info("Waiting for NM " + index + " to start...");
Thread.sleep(1000);
}
if (nodeManagers[index].getServiceState() != STATE.STARTED) {
// RM could have failed.
throw new IOException("NodeManager " + index + " failed to start");
}
super.serviceStart();
} catch (Throwable t) {
throw new YarnRuntimeException(t);
}
}
@Override
protected synchronized void serviceStop() throws Exception {
if (nodeManagers[index] != null) {
nodeManagers[index].stop();
}
super.serviceStop();
}
}
private class CustomNodeManager extends NodeManager {
@Override
protected void doSecureLogin() throws IOException {
// Don't try to login using keytab in the testcase.
}
}
private class ShortCircuitedNodeManager extends CustomNodeManager {
@Override
protected NodeStatusUpdater createNodeStatusUpdater(Context context,
Dispatcher dispatcher, NodeHealthCheckerService healthChecker) {
return new NodeStatusUpdaterImpl(context, dispatcher,
healthChecker, metrics) {
@Override
protected ResourceTracker getRMClient() {
final ResourceTrackerService rt =
getResourceManager().getResourceTrackerService();
final RecordFactory recordFactory =
RecordFactoryProvider.getRecordFactory(null);
// For in-process communication without RPC
return new ResourceTracker() {
@Override
public NodeHeartbeatResponse nodeHeartbeat(
NodeHeartbeatRequest request) throws YarnException,
IOException {
NodeHeartbeatResponse response;
try {
response = rt.nodeHeartbeat(request);
} catch (YarnException e) {
LOG.info("Exception in heartbeat from node " +
request.getNodeStatus().getNodeId(), e);
throw e;
}
return response;
}
@Override
public RegisterNodeManagerResponse registerNodeManager(
RegisterNodeManagerRequest request)
throws YarnException, IOException {
RegisterNodeManagerResponse response;
try {
response = rt.registerNodeManager(request);
} catch (YarnException e) {
LOG.info("Exception in node registration from "
+ request.getNodeId().toString(), e);
throw e;
}
return response;
}
@Override
public UnRegisterNodeManagerResponse unRegisterNodeManager(
UnRegisterNodeManagerRequest request) throws YarnException,
IOException {
return recordFactory
.newRecordInstance(UnRegisterNodeManagerResponse.class);
}
};
}
@Override
protected void stopRMProxy() { }
};
}
}
/**
* Wait for all the NodeManagers to connect to the ResourceManager.
*
* @param timeout Time to wait (sleeps in 100 ms intervals) in milliseconds.
* @return true if all NodeManagers connect to the (Active)
* ResourceManager, false otherwise.
* @throws YarnException
* @throws InterruptedException
*/
public boolean waitForNodeManagersToConnect(long timeout)
throws YarnException, InterruptedException {
GetClusterMetricsRequest req = GetClusterMetricsRequest.newInstance();
for (int i = 0; i < timeout / 100; i++) {
ResourceManager rm = getResourceManager();
if (rm == null) {
throw new YarnException("Can not find the active RM.");
}
else if (nodeManagers.length == rm.getClientRMService()
.getClusterMetrics(req).getClusterMetrics().getNumNodeManagers()) {
return true;
}
Thread.sleep(100);
}
return false;
}
private class ApplicationHistoryServerWrapper extends AbstractService {
public ApplicationHistoryServerWrapper() {
super(ApplicationHistoryServerWrapper.class.getName());
}
@Override
protected synchronized void serviceInit(Configuration conf)
throws Exception {
appHistoryServer = new ApplicationHistoryServer();
conf.setClass(YarnConfiguration.APPLICATION_HISTORY_STORE,
MemoryApplicationHistoryStore.class, ApplicationHistoryStore.class);
conf.setClass(YarnConfiguration.TIMELINE_SERVICE_STORE,
MemoryTimelineStore.class, TimelineStore.class);
conf.setClass(YarnConfiguration.TIMELINE_SERVICE_STATE_STORE_CLASS,
MemoryTimelineStateStore.class, TimelineStateStore.class);
appHistoryServer.init(conf);
super.serviceInit(conf);
}
@Override
protected synchronized void serviceStart() throws Exception {
try {
new Thread() {
public void run() {
appHistoryServer.start();
};
}.start();
int waitCount = 0;
while (appHistoryServer.getServiceState() == STATE.INITED
&& waitCount++ < 60) {
LOG.info("Waiting for Timeline Server to start...");
Thread.sleep(1500);
}
if (appHistoryServer.getServiceState() != STATE.STARTED) {
// AHS could have failed.
throw new IOException(
"ApplicationHistoryServer failed to start. Final state is "
+ appHistoryServer.getServiceState());
}
super.serviceStart();
} catch (Throwable t) {
throw new YarnRuntimeException(t);
}
LOG.info("MiniYARN ApplicationHistoryServer address: "
+ getConfig().get(YarnConfiguration.TIMELINE_SERVICE_ADDRESS));
LOG.info("MiniYARN ApplicationHistoryServer web address: "
+ getConfig().get(YarnConfiguration.TIMELINE_SERVICE_WEBAPP_ADDRESS));
}
@Override
protected synchronized void serviceStop() throws Exception {
if (appHistoryServer != null) {
appHistoryServer.stop();
}
}
}
public ApplicationHistoryServer getApplicationHistoryServer() {
return this.appHistoryServer;
}
protected ResourceManager createResourceManager() {
return new ResourceManager(){
@Override
protected void doSecureLogin() throws IOException {
// Don't try to login using keytab in the testcases.
}
};
}
public int getNumOfResourceManager() {
return this.resourceManagers.length;
}
}
| 28,864 | 36.781414 | 105 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/ContainerTokenIdentifierForTest.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server;
import java.io.DataInput;
import java.io.DataInputStream;
import java.io.DataOutput;
import java.io.IOException;
import org.apache.commons.io.IOUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.LogAggregationContext;
import org.apache.hadoop.yarn.api.records.Priority;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.api.records.impl.pb.ContainerIdPBImpl;
import org.apache.hadoop.yarn.api.records.impl.pb.LogAggregationContextPBImpl;
import org.apache.hadoop.yarn.api.records.impl.pb.PriorityPBImpl;
import org.apache.hadoop.yarn.api.records.impl.pb.ResourcePBImpl;
import org.apache.hadoop.yarn.security.ContainerTokenIdentifier;
import org.apache.hadoop.yarn.proto.YarnProtos.LogAggregationContextProto;
import org.apache.hadoop.yarn.proto.YarnSecurityTestTokenProtos.ContainerTokenIdentifierForTestProto;
import com.google.protobuf.TextFormat;
public class ContainerTokenIdentifierForTest extends ContainerTokenIdentifier {
private static Log LOG = LogFactory.getLog(ContainerTokenIdentifier.class);
public static final Text KIND = new Text("ContainerToken");
private ContainerTokenIdentifierForTestProto proto;
public ContainerTokenIdentifierForTest(ContainerId containerID,
String hostName, String appSubmitter, Resource r, long expiryTimeStamp,
int masterKeyId, long rmIdentifier, Priority priority, long creationTime,
LogAggregationContext logAggregationContext) {
ContainerTokenIdentifierForTestProto.Builder builder =
ContainerTokenIdentifierForTestProto.newBuilder();
if (containerID != null) {
builder.setContainerId(((ContainerIdPBImpl)containerID).getProto());
}
builder.setNmHostAddr(hostName);
builder.setAppSubmitter(appSubmitter);
if (r != null) {
builder.setResource(((ResourcePBImpl)r).getProto());
}
builder.setExpiryTimeStamp(expiryTimeStamp);
builder.setMasterKeyId(masterKeyId);
builder.setRmIdentifier(rmIdentifier);
if (priority != null) {
builder.setPriority(((PriorityPBImpl)priority).getProto());
}
builder.setCreationTime(creationTime);
if (logAggregationContext != null) {
builder.setLogAggregationContext(
((LogAggregationContextPBImpl)logAggregationContext).getProto());
}
proto = builder.build();
}
public ContainerTokenIdentifierForTest(ContainerTokenIdentifier identifier,
String message) {
ContainerTokenIdentifierForTestProto.Builder builder =
ContainerTokenIdentifierForTestProto.newBuilder();
ContainerIdPBImpl containerID =
(ContainerIdPBImpl)identifier.getContainerID();
if (containerID != null) {
builder.setContainerId(containerID.getProto());
}
builder.setNmHostAddr(identifier.getNmHostAddress());
builder.setAppSubmitter(identifier.getApplicationSubmitter());
ResourcePBImpl resource = (ResourcePBImpl)identifier.getResource();
if (resource != null) {
builder.setResource(resource.getProto());
}
builder.setExpiryTimeStamp(identifier.getExpiryTimeStamp());
builder.setMasterKeyId(identifier.getMasterKeyId());
builder.setRmIdentifier(identifier.getRMIdentifier());
PriorityPBImpl priority = (PriorityPBImpl)identifier.getPriority();
if (priority != null) {
builder.setPriority(priority.getProto());
}
builder.setCreationTime(identifier.getCreationTime());
builder.setMessage(message);
LogAggregationContextPBImpl logAggregationContext =
(LogAggregationContextPBImpl)identifier.getLogAggregationContext();
if (logAggregationContext != null) {
builder.setLogAggregationContext(logAggregationContext.getProto());
}
proto = builder.build();
}
public ContainerId getContainerID() {
return new ContainerIdPBImpl(proto.getContainerId());
}
public String getApplicationSubmitter() {
return proto.getAppSubmitter();
}
public String getNmHostAddress() {
return proto.getNmHostAddr();
}
public Resource getResource() {
return new ResourcePBImpl(proto.getResource());
}
public long getExpiryTimeStamp() {
return proto.getExpiryTimeStamp();
}
public int getMasterKeyId() {
return proto.getMasterKeyId();
}
public Priority getPriority() {
return new PriorityPBImpl(proto.getPriority());
}
public long getCreationTime() {
return proto.getCreationTime();
}
/**
* Get the RMIdentifier of RM in which containers are allocated
* @return RMIdentifier
*/
public long getRMIdentifier() {
return proto.getRmIdentifier();
}
@Override
public void readFields(DataInput in) throws IOException {
DataInputStream dis = (DataInputStream)in;
byte[] buffer = IOUtils.toByteArray(dis);
proto = ContainerTokenIdentifierForTestProto.parseFrom(buffer);
}
@Override
public void write(DataOutput out) throws IOException {
LOG.debug("Writing ContainerTokenIdentifierForTest to RPC layer: " + this);
out.write(proto.toByteArray());
}
ContainerTokenIdentifierForTestProto getNewProto() {
return this.proto;
}
@Override
public int hashCode() {
return this.proto.hashCode();
}
@Override
public boolean equals(Object other) {
if (other == null)
return false;
if (other.getClass().isAssignableFrom(this.getClass())) {
return this.getNewProto().equals(this.getClass().cast(other).getNewProto());
}
return false;
}
@Override
public String toString() {
return TextFormat.shortDebugString(this.proto);
}
}
| 6,588 | 32.617347 | 101 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestDiskFailures.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileContext;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.UnsupportedFileSystemException;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.yarn.api.records.NodeState;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.server.MiniYARNCluster;
import org.apache.hadoop.yarn.server.nodemanager.LocalDirsHandlerService;
import org.apache.hadoop.yarn.server.nodemanager.NodeManager;
import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.util.Iterator;
import java.util.List;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
import org.junit.Assert;
/**
* Verify if NodeManager's in-memory good local dirs list and good log dirs list
* get updated properly when disks(nm-local-dirs and nm-log-dirs) fail. Also
* verify if the overall health status of the node gets updated properly when
* specified percentage of disks fail.
*/
public class TestDiskFailures {
private static final Log LOG = LogFactory.getLog(TestDiskFailures.class);
private static final long DISK_HEALTH_CHECK_INTERVAL = 1000;//1 sec
private static FileContext localFS = null;
private static final File testDir = new File("target",
TestDiskFailures.class.getName()).getAbsoluteFile();
private static final File localFSDirBase = new File(testDir,
TestDiskFailures.class.getName() + "-localDir");
private static final int numLocalDirs = 4;
private static final int numLogDirs = 4;
private static MiniYARNCluster yarnCluster;
LocalDirsHandlerService dirsHandler;
@BeforeClass
public static void setup() throws AccessControlException,
FileNotFoundException, UnsupportedFileSystemException, IOException {
localFS = FileContext.getLocalFSFileContext();
localFS.delete(new Path(localFSDirBase.getAbsolutePath()), true);
localFSDirBase.mkdirs();
// Do not start cluster here
}
@AfterClass
public static void teardown() {
if (yarnCluster != null) {
yarnCluster.stop();
yarnCluster = null;
}
FileUtil.fullyDelete(localFSDirBase);
}
/**
* Make local-dirs fail/inaccessible and verify if NodeManager can
* recognize the disk failures properly and can update the list of
* local-dirs accordingly with good disks. Also verify the overall
* health status of the node.
* @throws IOException
*/
@Test
public void testLocalDirsFailures() throws IOException {
testDirsFailures(true);
}
/**
* Make log-dirs fail/inaccessible and verify if NodeManager can
* recognize the disk failures properly and can update the list of
* log-dirs accordingly with good disks. Also verify the overall health
* status of the node.
* @throws IOException
*/
@Test
public void testLogDirsFailures() throws IOException {
testDirsFailures(false);
}
/**
* Make a local and log directory inaccessible during initialization
* and verify those bad directories are recognized and removed from
* the list of available local and log directories.
* @throws IOException
*/
@Test
public void testDirFailuresOnStartup() throws IOException {
Configuration conf = new YarnConfiguration();
String localDir1 = new File(testDir, "localDir1").getPath();
String localDir2 = new File(testDir, "localDir2").getPath();
String logDir1 = new File(testDir, "logDir1").getPath();
String logDir2 = new File(testDir, "logDir2").getPath();
conf.set(YarnConfiguration.NM_LOCAL_DIRS, localDir1 + "," + localDir2);
conf.set(YarnConfiguration.NM_LOG_DIRS, logDir1 + "," + logDir2);
prepareDirToFail(localDir1);
prepareDirToFail(logDir2);
LocalDirsHandlerService dirSvc = new LocalDirsHandlerService();
dirSvc.init(conf);
List<String> localDirs = dirSvc.getLocalDirs();
Assert.assertEquals(1, localDirs.size());
Assert.assertEquals(new Path(localDir2).toString(), localDirs.get(0));
List<String> logDirs = dirSvc.getLogDirs();
Assert.assertEquals(1, logDirs.size());
Assert.assertEquals(new Path(logDir1).toString(), logDirs.get(0));
}
private void testDirsFailures(boolean localORLogDirs) throws IOException {
String dirType = localORLogDirs ? "local" : "log";
String dirsProperty = localORLogDirs ? YarnConfiguration.NM_LOCAL_DIRS
: YarnConfiguration.NM_LOG_DIRS;
Configuration conf = new Configuration();
// set disk health check interval to a small value (say 1 sec).
conf.setLong(YarnConfiguration.NM_DISK_HEALTH_CHECK_INTERVAL_MS,
DISK_HEALTH_CHECK_INTERVAL);
// If 2 out of the total 4 local-dirs fail OR if 2 Out of the total 4
// log-dirs fail, then the node's health status should become unhealthy.
conf.setFloat(YarnConfiguration.NM_MIN_HEALTHY_DISKS_FRACTION, 0.60F);
if (yarnCluster != null) {
yarnCluster.stop();
FileUtil.fullyDelete(localFSDirBase);
localFSDirBase.mkdirs();
}
LOG.info("Starting up YARN cluster");
yarnCluster = new MiniYARNCluster(TestDiskFailures.class.getName(),
1, numLocalDirs, numLogDirs);
yarnCluster.init(conf);
yarnCluster.start();
NodeManager nm = yarnCluster.getNodeManager(0);
LOG.info("Configured nm-" + dirType + "-dirs="
+ nm.getConfig().get(dirsProperty));
dirsHandler = nm.getNodeHealthChecker().getDiskHandler();
List<String> list = localORLogDirs ? dirsHandler.getLocalDirs()
: dirsHandler.getLogDirs();
String[] dirs = list.toArray(new String[list.size()]);
Assert.assertEquals("Number of nm-" + dirType + "-dirs is wrong.",
numLocalDirs, dirs.length);
String expectedDirs = StringUtils.join(",", list);
// validate the health of disks initially
verifyDisksHealth(localORLogDirs, expectedDirs, true);
// Make 1 nm-local-dir fail and verify if "the nodemanager can identify
// the disk failure(s) and can update the list of good nm-local-dirs.
prepareDirToFail(dirs[2]);
expectedDirs = dirs[0] + "," + dirs[1] + ","
+ dirs[3];
verifyDisksHealth(localORLogDirs, expectedDirs, true);
// Now, make 1 more nm-local-dir/nm-log-dir fail and verify if "the
// nodemanager can identify the disk failures and can update the list of
// good nm-local-dirs/nm-log-dirs and can update the overall health status
// of the node to unhealthy".
prepareDirToFail(dirs[0]);
expectedDirs = dirs[1] + "," + dirs[3];
verifyDisksHealth(localORLogDirs, expectedDirs, false);
// Fail the remaining 2 local-dirs/log-dirs and verify if NM remains with
// empty list of local-dirs/log-dirs and the overall health status is
// unhealthy.
prepareDirToFail(dirs[1]);
prepareDirToFail(dirs[3]);
expectedDirs = "";
verifyDisksHealth(localORLogDirs, expectedDirs, false);
}
/**
* Wait for the NodeManger to go for the disk-health-check at least once.
*/
private void waitForDiskHealthCheck() {
long lastDisksCheckTime = dirsHandler.getLastDisksCheckTime();
long time = lastDisksCheckTime;
for (int i = 0; i < 10 && (time <= lastDisksCheckTime); i++) {
try {
Thread.sleep(1000);
} catch(InterruptedException e) {
LOG.error(
"Interrupted while waiting for NodeManager's disk health check.");
}
time = dirsHandler.getLastDisksCheckTime();
}
}
/**
* Verify if the NodeManager could identify disk failures.
* @param localORLogDirs <em>true</em> represent nm-local-dirs and <em>false
* </em> means nm-log-dirs
* @param expectedDirs expected nm-local-dirs/nm-log-dirs as a string
* @param isHealthy <em>true</em> if the overall node should be healthy
*/
private void verifyDisksHealth(boolean localORLogDirs, String expectedDirs,
boolean isHealthy) {
// Wait for the NodeManager to identify disk failures.
waitForDiskHealthCheck();
List<String> list = localORLogDirs ? dirsHandler.getLocalDirs()
: dirsHandler.getLogDirs();
String seenDirs = StringUtils.join(",", list);
LOG.info("ExpectedDirs=" + expectedDirs);
LOG.info("SeenDirs=" + seenDirs);
Assert.assertTrue("NodeManager could not identify disk failure.",
expectedDirs.equals(seenDirs));
Assert.assertEquals("Node's health in terms of disks is wrong",
isHealthy, dirsHandler.areDisksHealthy());
for (int i = 0; i < 10; i++) {
Iterator<RMNode> iter = yarnCluster.getResourceManager().getRMContext()
.getRMNodes().values().iterator();
if ((iter.next().getState() != NodeState.UNHEALTHY) == isHealthy) {
break;
}
// wait for the node health info to go to RM
try {
Thread.sleep(1000);
} catch(InterruptedException e) {
LOG.error("Interrupted while waiting for NM->RM heartbeat.");
}
}
Iterator<RMNode> iter = yarnCluster.getResourceManager().getRMContext()
.getRMNodes().values().iterator();
Assert.assertEquals("RM is not updated with the health status of a node",
isHealthy, iter.next().getState() != NodeState.UNHEALTHY);
}
/**
* Prepare directory for a failure: Replace the given directory on the
* local FileSystem with a regular file with the same name.
* This would cause failure of creation of directory in DiskChecker.checkDir()
* with the same name.
* @param dir the directory to be failed
* @throws IOException
*/
private void prepareDirToFail(String dir) throws IOException {
File file = new File(dir);
FileUtil.fullyDelete(file);
file.createNewFile();
LOG.info("Prepared " + dir + " to fail.");
}
}
| 11,059 | 38.784173 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestMiniYARNClusterForHA.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.ha.HAServiceProtocol;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.exceptions.YarnException;
import org.junit.Before;
import org.junit.Test;
public class TestMiniYARNClusterForHA {
MiniYARNCluster cluster;
@Before
public void setup() throws IOException, InterruptedException {
Configuration conf = new YarnConfiguration();
conf.setBoolean(YarnConfiguration.AUTO_FAILOVER_ENABLED, false);
conf.set(YarnConfiguration.RM_WEBAPP_ADDRESS, "localhost:0");
cluster = new MiniYARNCluster(TestMiniYARNClusterForHA.class.getName(),
2, 1, 1, 1);
cluster.init(conf);
cluster.start();
cluster.getResourceManager(0).getRMContext().getRMAdminService()
.transitionToActive(new HAServiceProtocol.StateChangeRequestInfo(
HAServiceProtocol.RequestSource.REQUEST_BY_USER));
assertFalse("RM never turned active", -1 == cluster.getActiveRMIndex());
}
@Test
public void testClusterWorks() throws YarnException, InterruptedException {
assertTrue("NMs fail to connect to the RM",
cluster.waitForNodeManagersToConnect(5000));
}
}
| 2,154 | 34.916667 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestContainerManagerSecurity.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server;
import static org.junit.Assert.fail;
import java.io.File;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.LinkedList;
import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.io.DataInputBuffer;
import org.apache.hadoop.minikdc.KerberosSecurityTestcase;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.SecretManager.InvalidToken;
import org.apache.hadoop.yarn.api.ContainerManagementProtocol;
import org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusesRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusesResponse;
import org.apache.hadoop.yarn.api.protocolrecords.StartContainerRequest;
import org.apache.hadoop.yarn.api.protocolrecords.StartContainersRequest;
import org.apache.hadoop.yarn.api.protocolrecords.StartContainersResponse;
import org.apache.hadoop.yarn.api.protocolrecords.StopContainersRequest;
import org.apache.hadoop.yarn.api.protocolrecords.StopContainersResponse;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
import org.apache.hadoop.yarn.api.records.ContainerState;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.api.records.Priority;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.api.records.SerializedException;
import org.apache.hadoop.yarn.api.records.Token;
import org.apache.hadoop.yarn.client.NMProxy;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.factories.RecordFactory;
import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
import org.apache.hadoop.yarn.ipc.YarnRPC;
import org.apache.hadoop.yarn.security.ContainerTokenIdentifier;
import org.apache.hadoop.yarn.security.NMTokenIdentifier;
import org.apache.hadoop.yarn.server.nodemanager.Context;
import org.apache.hadoop.yarn.server.nodemanager.NodeManager;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.ContainerManagerImpl;
import org.apache.hadoop.yarn.server.nodemanager.security.NMTokenSecretManagerInNM;
import org.apache.hadoop.yarn.server.resourcemanager.security.NMTokenSecretManagerInRM;
import org.apache.hadoop.yarn.server.resourcemanager.security.RMContainerTokenSecretManager;
import org.apache.hadoop.yarn.server.security.BaseNMTokenSecretManager;
import org.apache.hadoop.yarn.server.utils.BuilderUtils;
import org.apache.hadoop.yarn.util.ConverterUtils;
import org.apache.hadoop.yarn.util.Records;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
import org.junit.runners.Parameterized.Parameters;
@RunWith(Parameterized.class)
public class TestContainerManagerSecurity extends KerberosSecurityTestcase {
static Log LOG = LogFactory.getLog(TestContainerManagerSecurity.class);
static final RecordFactory recordFactory = RecordFactoryProvider
.getRecordFactory(null);
private static MiniYARNCluster yarnCluster;
private static final File testRootDir = new File("target",
TestContainerManagerSecurity.class.getName() + "-root");
private static File httpSpnegoKeytabFile = new File(testRootDir,
"httpSpnegoKeytabFile.keytab");
private static String httpSpnegoPrincipal = "HTTP/[email protected]";
private Configuration conf;
@Before
public void setUp() throws Exception {
testRootDir.mkdirs();
httpSpnegoKeytabFile.deleteOnExit();
getKdc().createPrincipal(httpSpnegoKeytabFile, httpSpnegoPrincipal);
yarnCluster =
new MiniYARNCluster(TestContainerManagerSecurity.class.getName(), 1, 1,
1);
yarnCluster.init(conf);
yarnCluster.start();
}
@After
public void tearDown() {
if (yarnCluster != null) {
yarnCluster.stop();
yarnCluster = null;
}
testRootDir.delete();
}
@Parameters
public static Collection<Object[]> configs() {
Configuration configurationWithoutSecurity = new Configuration();
configurationWithoutSecurity.set(
CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, "simple");
Configuration configurationWithSecurity = new Configuration();
configurationWithSecurity.set(
CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, "kerberos");
configurationWithSecurity.set(
YarnConfiguration.RM_WEBAPP_SPNEGO_USER_NAME_KEY, httpSpnegoPrincipal);
configurationWithSecurity.set(
YarnConfiguration.RM_WEBAPP_SPNEGO_KEYTAB_FILE_KEY,
httpSpnegoKeytabFile.getAbsolutePath());
configurationWithSecurity.set(
YarnConfiguration.NM_WEBAPP_SPNEGO_USER_NAME_KEY, httpSpnegoPrincipal);
configurationWithSecurity.set(
YarnConfiguration.NM_WEBAPP_SPNEGO_KEYTAB_FILE_KEY,
httpSpnegoKeytabFile.getAbsolutePath());
return Arrays.asList(new Object[][] { { configurationWithoutSecurity },
{ configurationWithSecurity } });
}
public TestContainerManagerSecurity(Configuration conf) {
conf.setLong(YarnConfiguration.RM_AM_EXPIRY_INTERVAL_MS, 100000L);
UserGroupInformation.setConfiguration(conf);
this.conf = conf;
}
@Test (timeout = 120000)
public void testContainerManager() throws Exception {
// TestNMTokens.
testNMTokens(conf);
// Testing for container token tampering
testContainerToken(conf);
// Testing for container token tampering with epoch
testContainerTokenWithEpoch(conf);
}
private void testNMTokens(Configuration conf) throws Exception {
NMTokenSecretManagerInRM nmTokenSecretManagerRM =
yarnCluster.getResourceManager().getRMContext()
.getNMTokenSecretManager();
NMTokenSecretManagerInNM nmTokenSecretManagerNM =
yarnCluster.getNodeManager(0).getNMContext().getNMTokenSecretManager();
RMContainerTokenSecretManager containerTokenSecretManager =
yarnCluster.getResourceManager().getRMContext().
getContainerTokenSecretManager();
NodeManager nm = yarnCluster.getNodeManager(0);
waitForNMToReceiveNMTokenKey(nmTokenSecretManagerNM, nm);
// Both id should be equal.
Assert.assertEquals(nmTokenSecretManagerNM.getCurrentKey().getKeyId(),
nmTokenSecretManagerRM.getCurrentKey().getKeyId());
/*
* Below cases should be tested.
* 1) If Invalid NMToken is used then it should be rejected.
* 2) If valid NMToken but belonging to another Node is used then that
* too should be rejected.
* 3) NMToken for say appAttempt-1 is used for starting/stopping/retrieving
* status for container with containerId for say appAttempt-2 should
* be rejected.
* 4) After start container call is successful nmtoken should have been
* saved in NMTokenSecretManagerInNM.
* 5) If start container call was successful (no matter if container is
* still running or not), appAttempt->NMToken should be present in
* NMTokenSecretManagerInNM's cache. Any future getContainerStatus call
* for containerId belonging to that application attempt using
* applicationAttempt's older nmToken should not get any invalid
* nmToken error. (This can be best tested if we roll over NMToken
* master key twice).
*/
YarnRPC rpc = YarnRPC.create(conf);
String user = "test";
Resource r = Resource.newInstance(1024, 1);
ApplicationId appId = ApplicationId.newInstance(1, 1);
ApplicationAttemptId validAppAttemptId =
ApplicationAttemptId.newInstance(appId, 1);
ContainerId validContainerId =
ContainerId.newContainerId(validAppAttemptId, 0);
NodeId validNode = yarnCluster.getNodeManager(0).getNMContext().getNodeId();
NodeId invalidNode = NodeId.newInstance("InvalidHost", 1234);
org.apache.hadoop.yarn.api.records.Token validNMToken =
nmTokenSecretManagerRM.createNMToken(validAppAttemptId, validNode, user);
org.apache.hadoop.yarn.api.records.Token validContainerToken =
containerTokenSecretManager.createContainerToken(validContainerId,
validNode, user, r, Priority.newInstance(10), 1234);
ContainerTokenIdentifier identifier =
BuilderUtils.newContainerTokenIdentifier(validContainerToken);
Assert.assertEquals(Priority.newInstance(10), identifier.getPriority());
Assert.assertEquals(1234, identifier.getCreationTime());
StringBuilder sb;
// testInvalidNMToken ... creating NMToken using different secret manager.
NMTokenSecretManagerInRM tempManager = new NMTokenSecretManagerInRM(conf);
tempManager.rollMasterKey();
do {
tempManager.rollMasterKey();
tempManager.activateNextMasterKey();
// Making sure key id is different.
} while (tempManager.getCurrentKey().getKeyId() == nmTokenSecretManagerRM
.getCurrentKey().getKeyId());
// Testing that NM rejects the requests when we don't send any token.
if (UserGroupInformation.isSecurityEnabled()) {
sb = new StringBuilder("Client cannot authenticate via:[TOKEN]");
} else {
sb =
new StringBuilder(
"SIMPLE authentication is not enabled. Available:[TOKEN]");
}
String errorMsg = testStartContainer(rpc, validAppAttemptId, validNode,
validContainerToken, null, true);
Assert.assertTrue(errorMsg.contains(sb.toString()));
org.apache.hadoop.yarn.api.records.Token invalidNMToken =
tempManager.createNMToken(validAppAttemptId, validNode, user);
sb = new StringBuilder("Given NMToken for application : ");
sb.append(validAppAttemptId.toString())
.append(" seems to have been generated illegally.");
Assert.assertTrue(sb.toString().contains(
testStartContainer(rpc, validAppAttemptId, validNode,
validContainerToken, invalidNMToken, true)));
// valid NMToken but belonging to other node
invalidNMToken =
nmTokenSecretManagerRM.createNMToken(validAppAttemptId, invalidNode,
user);
sb = new StringBuilder("Given NMToken for application : ");
sb.append(validAppAttemptId)
.append(" is not valid for current node manager.expected : ")
.append(validNode.toString())
.append(" found : ").append(invalidNode.toString());
Assert.assertTrue(sb.toString().contains(
testStartContainer(rpc, validAppAttemptId, validNode,
validContainerToken, invalidNMToken, true)));
// using correct tokens. nmtoken for app attempt should get saved.
conf.setInt(YarnConfiguration.RM_CONTAINER_ALLOC_EXPIRY_INTERVAL_MS,
4 * 60 * 1000);
validContainerToken =
containerTokenSecretManager.createContainerToken(validContainerId,
validNode, user, r, Priority.newInstance(0), 0);
Assert.assertTrue(testStartContainer(rpc, validAppAttemptId, validNode,
validContainerToken, validNMToken, false).isEmpty());
Assert.assertTrue(nmTokenSecretManagerNM
.isAppAttemptNMTokenKeyPresent(validAppAttemptId));
// using a new compatible version nmtoken, expect container can be started
// successfully.
ApplicationAttemptId validAppAttemptId2 =
ApplicationAttemptId.newInstance(appId, 2);
ContainerId validContainerId2 =
ContainerId.newContainerId(validAppAttemptId2, 0);
org.apache.hadoop.yarn.api.records.Token validContainerToken2 =
containerTokenSecretManager.createContainerToken(validContainerId2,
validNode, user, r, Priority.newInstance(0), 0);
org.apache.hadoop.yarn.api.records.Token validNMToken2 =
nmTokenSecretManagerRM.createNMToken(validAppAttemptId2, validNode, user);
// First, get a new NMTokenIdentifier.
NMTokenIdentifier newIdentifier = new NMTokenIdentifier();
byte[] tokenIdentifierContent = validNMToken2.getIdentifier().array();
DataInputBuffer dib = new DataInputBuffer();
dib.reset(tokenIdentifierContent, tokenIdentifierContent.length);
newIdentifier.readFields(dib);
// Then, generate a new version NMTokenIdentifier (NMTokenIdentifierNewForTest)
// with additional field of message.
NMTokenIdentifierNewForTest newVersionIdentifier =
new NMTokenIdentifierNewForTest(newIdentifier, "message");
// check new version NMTokenIdentifier has correct info.
Assert.assertEquals("The ApplicationAttemptId is changed after set to " +
"newVersionIdentifier", validAppAttemptId2.getAttemptId(),
newVersionIdentifier.getApplicationAttemptId().getAttemptId()
);
Assert.assertEquals("The message is changed after set to newVersionIdentifier",
"message", newVersionIdentifier.getMessage());
Assert.assertEquals("The NodeId is changed after set to newVersionIdentifier",
validNode, newVersionIdentifier.getNodeId());
// create new Token based on new version NMTokenIdentifier.
org.apache.hadoop.yarn.api.records.Token newVersionedNMToken =
BaseNMTokenSecretManager.newInstance(
nmTokenSecretManagerRM.retrievePassword(newVersionIdentifier),
newVersionIdentifier);
// Verify startContainer is successful and no exception is thrown.
Assert.assertTrue(testStartContainer(rpc, validAppAttemptId2, validNode,
validContainerToken2, newVersionedNMToken, false).isEmpty());
Assert.assertTrue(nmTokenSecretManagerNM
.isAppAttemptNMTokenKeyPresent(validAppAttemptId2));
//Now lets wait till container finishes and is removed from node manager.
waitForContainerToFinishOnNM(validContainerId);
sb = new StringBuilder("Attempt to relaunch the same container with id ");
sb.append(validContainerId);
Assert.assertTrue(testStartContainer(rpc, validAppAttemptId, validNode,
validContainerToken, validNMToken, true).contains(sb.toString()));
// Container is removed from node manager's memory by this time.
// trying to stop the container. It should not throw any exception.
testStopContainer(rpc, validAppAttemptId, validNode, validContainerId,
validNMToken, false);
// Rolling over master key twice so that we can check whether older keys
// are used for authentication.
rollNMTokenMasterKey(nmTokenSecretManagerRM, nmTokenSecretManagerNM);
// Key rolled over once.. rolling over again
rollNMTokenMasterKey(nmTokenSecretManagerRM, nmTokenSecretManagerNM);
// trying get container status. Now saved nmToken should be used for
// authentication... It should complain saying container was recently
// stopped.
sb = new StringBuilder("Container ");
sb.append(validContainerId);
sb.append(" was recently stopped on node manager");
Assert.assertTrue(testGetContainer(rpc, validAppAttemptId, validNode,
validContainerId, validNMToken, true).contains(sb.toString()));
// Now lets remove the container from nm-memory
nm.getNodeStatusUpdater().clearFinishedContainersFromCache();
// This should fail as container is removed from recently tracked finished
// containers.
sb = new StringBuilder("Container ");
sb.append(validContainerId.toString());
sb.append(" is not handled by this NodeManager");
Assert.assertTrue(testGetContainer(rpc, validAppAttemptId, validNode,
validContainerId, validNMToken, false).contains(sb.toString()));
// using appAttempt-1 NMtoken for launching container for appAttempt-2 should
// succeed.
ApplicationAttemptId attempt2 = ApplicationAttemptId.newInstance(appId, 2);
Token attempt1NMToken =
nmTokenSecretManagerRM
.createNMToken(validAppAttemptId, validNode, user);
org.apache.hadoop.yarn.api.records.Token newContainerToken =
containerTokenSecretManager.createContainerToken(
ContainerId.newContainerId(attempt2, 1), validNode, user, r,
Priority.newInstance(0), 0);
Assert.assertTrue(testStartContainer(rpc, attempt2, validNode,
newContainerToken, attempt1NMToken, false).isEmpty());
}
private void waitForContainerToFinishOnNM(ContainerId containerId) {
Context nmContet = yarnCluster.getNodeManager(0).getNMContext();
int interval = 4 * 60; // Max time for container token to expire.
Assert.assertNotNull(nmContet.getContainers().containsKey(containerId));
while ((interval-- > 0)
&& !nmContet.getContainers().get(containerId)
.cloneAndGetContainerStatus().getState()
.equals(ContainerState.COMPLETE)) {
try {
LOG.info("Waiting for " + containerId + " to complete.");
Thread.sleep(1000);
} catch (InterruptedException e) {
}
}
// Normally, Containers will be removed from NM context after they are
// explicitly acked by RM. Now, manually remove it for testing.
yarnCluster.getNodeManager(0).getNodeStatusUpdater()
.addCompletedContainer(containerId);
nmContet.getContainers().remove(containerId);
}
protected void waitForNMToReceiveNMTokenKey(
NMTokenSecretManagerInNM nmTokenSecretManagerNM, NodeManager nm)
throws InterruptedException {
int attempt = 60;
ContainerManagerImpl cm =
((ContainerManagerImpl) nm.getNMContext().getContainerManager());
while ((cm.getBlockNewContainerRequestsStatus() || nmTokenSecretManagerNM
.getNodeId() == null) && attempt-- > 0) {
Thread.sleep(2000);
}
}
protected void rollNMTokenMasterKey(
NMTokenSecretManagerInRM nmTokenSecretManagerRM,
NMTokenSecretManagerInNM nmTokenSecretManagerNM) throws Exception {
int oldKeyId = nmTokenSecretManagerRM.getCurrentKey().getKeyId();
nmTokenSecretManagerRM.rollMasterKey();
int interval = 40;
while (nmTokenSecretManagerNM.getCurrentKey().getKeyId() == oldKeyId
&& interval-- > 0) {
Thread.sleep(1000);
}
nmTokenSecretManagerRM.activateNextMasterKey();
Assert.assertTrue((nmTokenSecretManagerNM.getCurrentKey().getKeyId()
== nmTokenSecretManagerRM.getCurrentKey().getKeyId()));
}
private String testStopContainer(YarnRPC rpc,
ApplicationAttemptId appAttemptId, NodeId nodeId,
ContainerId containerId, Token nmToken, boolean isExceptionExpected) {
try {
stopContainer(rpc, nmToken,
Arrays.asList(new ContainerId[] { containerId }), appAttemptId,
nodeId);
if (isExceptionExpected) {
fail("Exception was expected!!");
}
return "";
} catch (Exception e) {
e.printStackTrace();
return e.getMessage();
}
}
private String testGetContainer(YarnRPC rpc,
ApplicationAttemptId appAttemptId, NodeId nodeId,
ContainerId containerId,
org.apache.hadoop.yarn.api.records.Token nmToken,
boolean isExceptionExpected) {
try {
getContainerStatus(rpc, nmToken, containerId, appAttemptId, nodeId,
isExceptionExpected);
if (isExceptionExpected) {
fail("Exception was expected!!");
}
return "";
} catch (Exception e) {
e.printStackTrace();
return e.getMessage();
}
}
private String testStartContainer(YarnRPC rpc,
ApplicationAttemptId appAttemptId, NodeId nodeId,
org.apache.hadoop.yarn.api.records.Token containerToken,
org.apache.hadoop.yarn.api.records.Token nmToken,
boolean isExceptionExpected) {
try {
startContainer(rpc, nmToken, containerToken, nodeId,
appAttemptId.toString());
if (isExceptionExpected){
fail("Exception was expected!!");
}
return "";
} catch (Exception e) {
e.printStackTrace();
return e.getMessage();
}
}
private void stopContainer(YarnRPC rpc, Token nmToken,
List<ContainerId> containerId, ApplicationAttemptId appAttemptId,
NodeId nodeId) throws Exception {
StopContainersRequest request =
StopContainersRequest.newInstance(containerId);
ContainerManagementProtocol proxy = null;
try {
proxy =
getContainerManagementProtocolProxy(rpc, nmToken, nodeId,
appAttemptId.toString());
StopContainersResponse response = proxy.stopContainers(request);
if (response.getFailedRequests() != null &&
response.getFailedRequests().containsKey(containerId)) {
parseAndThrowException(response.getFailedRequests().get(containerId)
.deSerialize());
}
} catch (Exception e) {
if (proxy != null) {
rpc.stopProxy(proxy, conf);
}
}
}
private void
getContainerStatus(YarnRPC rpc,
org.apache.hadoop.yarn.api.records.Token nmToken,
ContainerId containerId,
ApplicationAttemptId appAttemptId, NodeId nodeId,
boolean isExceptionExpected) throws Exception {
List<ContainerId> containerIds = new ArrayList<ContainerId>();
containerIds.add(containerId);
GetContainerStatusesRequest request =
GetContainerStatusesRequest.newInstance(containerIds);
ContainerManagementProtocol proxy = null;
try {
proxy =
getContainerManagementProtocolProxy(rpc, nmToken, nodeId,
appAttemptId.toString());
GetContainerStatusesResponse statuses = proxy.getContainerStatuses(request);
if (statuses.getFailedRequests() != null
&& statuses.getFailedRequests().containsKey(containerId)) {
parseAndThrowException(statuses.getFailedRequests().get(containerId)
.deSerialize());
}
} finally {
if (proxy != null) {
rpc.stopProxy(proxy, conf);
}
}
}
private void startContainer(final YarnRPC rpc,
org.apache.hadoop.yarn.api.records.Token nmToken,
org.apache.hadoop.yarn.api.records.Token containerToken,
NodeId nodeId, String user) throws Exception {
ContainerLaunchContext context =
Records.newRecord(ContainerLaunchContext.class);
StartContainerRequest scRequest =
StartContainerRequest.newInstance(context,containerToken);
List<StartContainerRequest> list = new ArrayList<StartContainerRequest>();
list.add(scRequest);
StartContainersRequest allRequests =
StartContainersRequest.newInstance(list);
ContainerManagementProtocol proxy = null;
try {
proxy = getContainerManagementProtocolProxy(rpc, nmToken, nodeId, user);
StartContainersResponse response = proxy.startContainers(allRequests);
for(SerializedException ex : response.getFailedRequests().values()){
parseAndThrowException(ex.deSerialize());
}
} finally {
if (proxy != null) {
rpc.stopProxy(proxy, conf);
}
}
}
private void parseAndThrowException(Throwable t) throws YarnException,
IOException {
if (t instanceof YarnException) {
throw (YarnException) t;
} else if (t instanceof InvalidToken) {
throw (InvalidToken) t;
} else {
throw (IOException) t;
}
}
protected ContainerManagementProtocol getContainerManagementProtocolProxy(
final YarnRPC rpc, org.apache.hadoop.yarn.api.records.Token nmToken,
NodeId nodeId, String user) {
ContainerManagementProtocol proxy;
UserGroupInformation ugi = UserGroupInformation.createRemoteUser(user);
final InetSocketAddress addr =
NetUtils.createSocketAddr(nodeId.getHost(), nodeId.getPort());
if (nmToken != null) {
ugi.addToken(ConverterUtils.convertFromYarn(nmToken, addr));
}
proxy =
NMProxy.createNMProxy(conf, ContainerManagementProtocol.class, ugi,
rpc, addr);
return proxy;
}
/**
* This tests a malice user getting a proper token but then messing with it by
* tampering with containerID/Resource etc.. His/her containers should be
* rejected.
*
* @throws IOException
* @throws InterruptedException
* @throws YarnException
*/
private void testContainerToken(Configuration conf) throws IOException,
InterruptedException, YarnException {
LOG.info("Running test for malice user");
/*
* We need to check for containerToken (authorization).
* Here we will be assuming that we have valid NMToken
* 1) ContainerToken used is expired.
* 2) ContainerToken is tampered (resource is modified).
*/
NMTokenSecretManagerInRM nmTokenSecretManagerInRM =
yarnCluster.getResourceManager().getRMContext()
.getNMTokenSecretManager();
ApplicationId appId = ApplicationId.newInstance(1, 1);
ApplicationAttemptId appAttemptId =
ApplicationAttemptId.newInstance(appId, 0);
ContainerId cId = ContainerId.newContainerId(appAttemptId, 0);
NodeManager nm = yarnCluster.getNodeManager(0);
NMTokenSecretManagerInNM nmTokenSecretManagerInNM =
nm.getNMContext().getNMTokenSecretManager();
String user = "test";
waitForNMToReceiveNMTokenKey(nmTokenSecretManagerInNM, nm);
NodeId nodeId = nm.getNMContext().getNodeId();
// Both id should be equal.
Assert.assertEquals(nmTokenSecretManagerInNM.getCurrentKey().getKeyId(),
nmTokenSecretManagerInRM.getCurrentKey().getKeyId());
RMContainerTokenSecretManager containerTokenSecretManager =
yarnCluster.getResourceManager().getRMContext().
getContainerTokenSecretManager();
Resource r = Resource.newInstance(1230, 2);
Token containerToken =
containerTokenSecretManager.createContainerToken(
cId, nodeId, user, r, Priority.newInstance(0), 0);
ContainerTokenIdentifier containerTokenIdentifier =
getContainerTokenIdentifierFromToken(containerToken);
// Verify new compatible version ContainerTokenIdentifier can work successfully.
ContainerTokenIdentifierForTest newVersionTokenIdentifier =
new ContainerTokenIdentifierForTest(containerTokenIdentifier, "message");
byte[] password =
containerTokenSecretManager.createPassword(newVersionTokenIdentifier);
Token newContainerToken = BuilderUtils.newContainerToken(
nodeId, password, newVersionTokenIdentifier);
Token nmToken =
nmTokenSecretManagerInRM.createNMToken(appAttemptId, nodeId, user);
YarnRPC rpc = YarnRPC.create(conf);
Assert.assertTrue(testStartContainer(rpc, appAttemptId, nodeId,
newContainerToken, nmToken, false).isEmpty());
// Creating a tampered Container Token
RMContainerTokenSecretManager tamperedContainerTokenSecretManager =
new RMContainerTokenSecretManager(conf);
tamperedContainerTokenSecretManager.rollMasterKey();
do {
tamperedContainerTokenSecretManager.rollMasterKey();
tamperedContainerTokenSecretManager.activateNextMasterKey();
} while (containerTokenSecretManager.getCurrentKey().getKeyId()
== tamperedContainerTokenSecretManager.getCurrentKey().getKeyId());
ContainerId cId2 = ContainerId.newContainerId(appAttemptId, 1);
// Creating modified containerToken
Token containerToken2 =
tamperedContainerTokenSecretManager.createContainerToken(cId2, nodeId,
user, r, Priority.newInstance(0), 0);
StringBuilder sb = new StringBuilder("Given Container ");
sb.append(cId2);
sb.append(" seems to have an illegally generated token.");
Assert.assertTrue(testStartContainer(rpc, appAttemptId, nodeId,
containerToken2, nmToken, true).contains(sb.toString()));
}
private ContainerTokenIdentifier getContainerTokenIdentifierFromToken(
Token containerToken) throws IOException {
ContainerTokenIdentifier containerTokenIdentifier;
containerTokenIdentifier = new ContainerTokenIdentifier();
byte[] tokenIdentifierContent = containerToken.getIdentifier().array();
DataInputBuffer dib = new DataInputBuffer();
dib.reset(tokenIdentifierContent, tokenIdentifierContent.length);
containerTokenIdentifier.readFields(dib);
return containerTokenIdentifier;
}
/**
* This tests whether a containerId is serialized/deserialized with epoch.
*
* @throws IOException
* @throws InterruptedException
* @throws YarnException
*/
private void testContainerTokenWithEpoch(Configuration conf)
throws IOException, InterruptedException, YarnException {
LOG.info("Running test for serializing/deserializing containerIds");
NMTokenSecretManagerInRM nmTokenSecretManagerInRM =
yarnCluster.getResourceManager().getRMContext()
.getNMTokenSecretManager();
ApplicationId appId = ApplicationId.newInstance(1, 1);
ApplicationAttemptId appAttemptId =
ApplicationAttemptId.newInstance(appId, 0);
ContainerId cId = ContainerId.newContainerId(appAttemptId, (5L << 40) | 3L);
NodeManager nm = yarnCluster.getNodeManager(0);
NMTokenSecretManagerInNM nmTokenSecretManagerInNM =
nm.getNMContext().getNMTokenSecretManager();
String user = "test";
waitForNMToReceiveNMTokenKey(nmTokenSecretManagerInNM, nm);
NodeId nodeId = nm.getNMContext().getNodeId();
// Both id should be equal.
Assert.assertEquals(nmTokenSecretManagerInNM.getCurrentKey().getKeyId(),
nmTokenSecretManagerInRM.getCurrentKey().getKeyId());
// Creating a normal Container Token
RMContainerTokenSecretManager containerTokenSecretManager =
yarnCluster.getResourceManager().getRMContext().
getContainerTokenSecretManager();
Resource r = Resource.newInstance(1230, 2);
Token containerToken =
containerTokenSecretManager.createContainerToken(cId, nodeId, user, r,
Priority.newInstance(0), 0);
ContainerTokenIdentifier containerTokenIdentifier =
new ContainerTokenIdentifier();
byte[] tokenIdentifierContent = containerToken.getIdentifier().array();
DataInputBuffer dib = new DataInputBuffer();
dib.reset(tokenIdentifierContent, tokenIdentifierContent.length);
containerTokenIdentifier.readFields(dib);
Assert.assertEquals(cId, containerTokenIdentifier.getContainerID());
Assert.assertEquals(
cId.toString(), containerTokenIdentifier.getContainerID().toString());
Token nmToken =
nmTokenSecretManagerInRM.createNMToken(appAttemptId, nodeId, user);
YarnRPC rpc = YarnRPC.create(conf);
testStartContainer(rpc, appAttemptId, nodeId, containerToken, nmToken,
false);
List<ContainerId> containerIds = new LinkedList<ContainerId>();
containerIds.add(cId);
ContainerManagementProtocol proxy
= getContainerManagementProtocolProxy(rpc, nmToken, nodeId, user);
GetContainerStatusesResponse res = proxy.getContainerStatuses(
GetContainerStatusesRequest.newInstance(containerIds));
Assert.assertNotNull(res.getContainerStatuses().get(0));
Assert.assertEquals(
cId, res.getContainerStatuses().get(0).getContainerId());
Assert.assertEquals(cId.toString(),
res.getContainerStatuses().get(0).getContainerId().toString());
}
}
| 32,350 | 41.068921 | 92 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/NMTokenIdentifierNewForTest.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server;
import java.io.DataInput;
import java.io.DataInputStream;
import java.io.DataOutput;
import java.io.IOException;
import org.apache.commons.io.IOUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.TokenIdentifier;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationAttemptIdPBImpl;
import org.apache.hadoop.yarn.api.records.impl.pb.NodeIdPBImpl;
import org.apache.hadoop.yarn.proto.YarnSecurityTestTokenProtos.NMTokenIdentifierNewProto;
import org.apache.hadoop.yarn.security.NMTokenIdentifier;
import com.google.protobuf.TextFormat;
public class NMTokenIdentifierNewForTest extends NMTokenIdentifier {
private static Log LOG = LogFactory.getLog(NMTokenIdentifierNewForTest.class);
public static final Text KIND = new Text("NMToken");
private NMTokenIdentifierNewProto proto;
private NMTokenIdentifierNewProto.Builder builder;
public NMTokenIdentifierNewForTest(){
builder = NMTokenIdentifierNewProto.newBuilder();
}
public NMTokenIdentifierNewForTest(NMTokenIdentifierNewProto proto) {
this.proto = proto;
}
public NMTokenIdentifierNewForTest(NMTokenIdentifier tokenIdentifier,
String message) {
builder = NMTokenIdentifierNewProto.newBuilder();
builder.setAppAttemptId(tokenIdentifier.getProto().getAppAttemptId());
builder.setNodeId(tokenIdentifier.getProto().getNodeId());
builder.setAppSubmitter(tokenIdentifier.getApplicationSubmitter());
builder.setKeyId(tokenIdentifier.getKeyId());
builder.setMessage(message);
proto = builder.build();
builder = null;
}
@Override
public void write(DataOutput out) throws IOException {
LOG.debug("Writing NMTokenIdentifierNewForTest to RPC layer: " + this);
out.write(proto.toByteArray());
}
@Override
public void readFields(DataInput in) throws IOException {
DataInputStream dis = (DataInputStream)in;
byte[] buffer = IOUtils.toByteArray(dis);
proto = NMTokenIdentifierNewProto.parseFrom(buffer);
}
@Override
public Text getKind() {
return KIND;
}
@Override
public UserGroupInformation getUser() {
return null;
}
public String getMessage() {
return proto.getMessage();
}
public void setMessage(String message) {
builder.setMessage(message);
}
public NMTokenIdentifierNewProto getNewProto() {
return proto;
}
public void build() {
proto = builder.build();
builder = null;
}
public ApplicationAttemptId getApplicationAttemptId() {
return new ApplicationAttemptIdPBImpl(proto.getAppAttemptId());
}
public NodeId getNodeId() {
return new NodeIdPBImpl(proto.getNodeId());
}
public String getApplicationSubmitter() {
return proto.getAppSubmitter();
}
public int getKeyId() {
return proto.getKeyId();
}
@Override
public int hashCode() {
return this.proto.hashCode();
}
@Override
public boolean equals(Object other) {
if (other == null)
return false;
if (other.getClass().isAssignableFrom(this.getClass())) {
return this.getNewProto().equals(this.getClass().cast(other).getNewProto());
}
return false;
}
@Override
public String toString() {
return TextFormat.shortDebugString(this.proto);
}
}
| 4,350 | 28.80137 | 90 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestMiniYarnCluster.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.yarn.conf.HAUtil;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.server.resourcemanager.HATestUtil;
import org.junit.Assert;
import org.junit.Test;
public class TestMiniYarnCluster {
@Test
public void testTimelineServiceStartInMiniCluster() throws Exception {
Configuration conf = new YarnConfiguration();
int numNodeManagers = 1;
int numLocalDirs = 1;
int numLogDirs = 1;
boolean enableAHS;
/*
* Timeline service should not start if TIMELINE_SERVICE_ENABLED == false
* and enableAHS flag == false
*/
conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, false);
enableAHS = false;
MiniYARNCluster cluster = null;
try {
cluster = new MiniYARNCluster(TestMiniYarnCluster.class.getSimpleName(),
numNodeManagers, numLocalDirs, numLogDirs, numLogDirs, enableAHS);
cluster.init(conf);
cluster.start();
//verify that the timeline service is not started.
Assert.assertNull("Timeline Service should not have been started",
cluster.getApplicationHistoryServer());
}
finally {
if(cluster != null) {
cluster.stop();
}
}
/*
* Timeline service should start if TIMELINE_SERVICE_ENABLED == true
* and enableAHS == false
*/
conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, true);
enableAHS = false;
cluster = null;
try {
cluster = new MiniYARNCluster(TestMiniYarnCluster.class.getSimpleName(),
numNodeManagers, numLocalDirs, numLogDirs, numLogDirs, enableAHS);
cluster.init(conf);
cluster.start();
//Timeline service may sometime take a while to get started
int wait = 0;
while(cluster.getApplicationHistoryServer() == null && wait < 20) {
Thread.sleep(500);
wait++;
}
//verify that the timeline service is started.
Assert.assertNotNull("Timeline Service should have been started",
cluster.getApplicationHistoryServer());
}
finally {
if(cluster != null) {
cluster.stop();
}
}
/*
* Timeline service should start if TIMELINE_SERVICE_ENABLED == false
* and enableAHS == true
*/
conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, false);
enableAHS = true;
cluster = null;
try {
cluster = new MiniYARNCluster(TestMiniYarnCluster.class.getSimpleName(),
numNodeManagers, numLocalDirs, numLogDirs, numLogDirs, enableAHS);
cluster.init(conf);
cluster.start();
//Timeline service may sometime take a while to get started
int wait = 0;
while(cluster.getApplicationHistoryServer() == null && wait < 20) {
Thread.sleep(500);
wait++;
}
//verify that the timeline service is started.
Assert.assertNotNull("Timeline Service should have been started",
cluster.getApplicationHistoryServer());
}
finally {
if(cluster != null) {
cluster.stop();
}
}
}
@Test
public void testMultiRMConf() {
String RM1_NODE_ID = "rm1", RM2_NODE_ID = "rm2";
int RM1_PORT_BASE = 10000, RM2_PORT_BASE = 20000;
Configuration conf = new YarnConfiguration();
conf.set(YarnConfiguration.RM_CLUSTER_ID, "yarn-test-cluster");
conf.setBoolean(YarnConfiguration.RECOVERY_ENABLED, true);
conf.setBoolean(YarnConfiguration.RM_HA_ENABLED, true);
conf.setBoolean(YarnConfiguration.AUTO_FAILOVER_ENABLED, false);
conf.set(YarnConfiguration.RM_HA_IDS, RM1_NODE_ID + "," + RM2_NODE_ID);
HATestUtil.setRpcAddressForRM(RM1_NODE_ID, RM1_PORT_BASE, conf);
HATestUtil.setRpcAddressForRM(RM2_NODE_ID, RM2_PORT_BASE, conf);
conf.setBoolean(YarnConfiguration.YARN_MINICLUSTER_FIXED_PORTS, true);
conf.setBoolean(YarnConfiguration.YARN_MINICLUSTER_USE_RPC, true);
MiniYARNCluster cluster =
new MiniYARNCluster(TestMiniYarnCluster.class.getName(),
2, 0, 1, 1);
cluster.init(conf);
Configuration conf1 = cluster.getResourceManager(0).getConfig(),
conf2 = cluster.getResourceManager(1).getConfig();
Assert.assertFalse(conf1 == conf2);
Assert.assertEquals("0.0.0.0:18032",
conf1.get(HAUtil.addSuffix(YarnConfiguration.RM_ADDRESS, RM1_NODE_ID)));
Assert.assertEquals("0.0.0.0:28032",
conf1.get(HAUtil.addSuffix(YarnConfiguration.RM_ADDRESS, RM2_NODE_ID)));
Assert.assertEquals("rm1", conf1.get(YarnConfiguration.RM_HA_ID));
Assert.assertEquals("0.0.0.0:18032",
conf2.get(HAUtil.addSuffix(YarnConfiguration.RM_ADDRESS, RM1_NODE_ID)));
Assert.assertEquals("0.0.0.0:28032",
conf2.get(HAUtil.addSuffix(YarnConfiguration.RM_ADDRESS, RM2_NODE_ID)));
Assert.assertEquals("rm2", conf2.get(YarnConfiguration.RM_HA_ID));
}
}
| 5,729 | 36.45098 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestRMNMSecretKeys.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server;
import java.io.IOException;
import org.junit.Assert;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.event.Dispatcher;
import org.apache.hadoop.yarn.event.DrainDispatcher;
import org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatResponse;
import org.apache.hadoop.yarn.server.api.protocolrecords.RegisterNodeManagerResponse;
import org.apache.hadoop.yarn.server.api.records.MasterKey;
import org.apache.hadoop.yarn.server.resourcemanager.MockNM;
import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
import org.junit.Test;
public class TestRMNMSecretKeys {
@Test(timeout = 1000000)
public void testNMUpdation() throws Exception {
YarnConfiguration conf = new YarnConfiguration();
// validating RM NM keys for Unsecured environment
validateRMNMKeyExchange(conf);
// validating RM NM keys for secured environment
conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,
"kerberos");
UserGroupInformation.setConfiguration(conf);
validateRMNMKeyExchange(conf);
}
private void validateRMNMKeyExchange(YarnConfiguration conf) throws Exception {
// Default rolling and activation intervals are large enough, no need to
// intervene
final DrainDispatcher dispatcher = new DrainDispatcher();
ResourceManager rm = new ResourceManager() {
@Override
protected void doSecureLogin() throws IOException {
// Do nothing.
}
@Override
protected Dispatcher createDispatcher() {
return dispatcher;
}
@Override
protected void startWepApp() {
// Don't need it, skip.
}
};
rm.init(conf);
rm.start();
// Testing ContainerToken and NMToken
String containerToken = "Container Token : ";
String nmToken = "NM Token : ";
MockNM nm = new MockNM("host:1234", 3072, rm.getResourceTrackerService());
RegisterNodeManagerResponse registrationResponse = nm.registerNode();
MasterKey containerTokenMasterKey =
registrationResponse.getContainerTokenMasterKey();
Assert.assertNotNull(containerToken
+ "Registration should cause a key-update!", containerTokenMasterKey);
MasterKey nmTokenMasterKey = registrationResponse.getNMTokenMasterKey();
Assert.assertNotNull(nmToken
+ "Registration should cause a key-update!", nmTokenMasterKey);
dispatcher.await();
NodeHeartbeatResponse response = nm.nodeHeartbeat(true);
Assert.assertNull(containerToken +
"First heartbeat after registration shouldn't get any key updates!",
response.getContainerTokenMasterKey());
Assert.assertNull(nmToken +
"First heartbeat after registration shouldn't get any key updates!",
response.getNMTokenMasterKey());
dispatcher.await();
response = nm.nodeHeartbeat(true);
Assert.assertNull(containerToken +
"Even second heartbeat after registration shouldn't get any key updates!",
response.getContainerTokenMasterKey());
Assert.assertNull(nmToken +
"Even second heartbeat after registration shouldn't get any key updates!",
response.getContainerTokenMasterKey());
dispatcher.await();
// Let's force a roll-over
rm.getRMContext().getContainerTokenSecretManager().rollMasterKey();
rm.getRMContext().getNMTokenSecretManager().rollMasterKey();
// Heartbeats after roll-over and before activation should be fine.
response = nm.nodeHeartbeat(true);
Assert.assertNotNull(containerToken +
"Heartbeats after roll-over and before activation should not err out.",
response.getContainerTokenMasterKey());
Assert.assertNotNull(nmToken +
"Heartbeats after roll-over and before activation should not err out.",
response.getNMTokenMasterKey());
Assert.assertEquals(containerToken +
"Roll-over should have incremented the key-id only by one!",
containerTokenMasterKey.getKeyId() + 1,
response.getContainerTokenMasterKey().getKeyId());
Assert.assertEquals(nmToken +
"Roll-over should have incremented the key-id only by one!",
nmTokenMasterKey.getKeyId() + 1,
response.getNMTokenMasterKey().getKeyId());
dispatcher.await();
response = nm.nodeHeartbeat(true);
Assert.assertNull(containerToken +
"Second heartbeat after roll-over shouldn't get any key updates!",
response.getContainerTokenMasterKey());
Assert.assertNull(nmToken +
"Second heartbeat after roll-over shouldn't get any key updates!",
response.getNMTokenMasterKey());
dispatcher.await();
// Let's force activation
rm.getRMContext().getContainerTokenSecretManager().activateNextMasterKey();
rm.getRMContext().getNMTokenSecretManager().activateNextMasterKey();
response = nm.nodeHeartbeat(true);
Assert.assertNull(containerToken
+ "Activation shouldn't cause any key updates!",
response.getContainerTokenMasterKey());
Assert.assertNull(nmToken
+ "Activation shouldn't cause any key updates!",
response.getNMTokenMasterKey());
dispatcher.await();
response = nm.nodeHeartbeat(true);
Assert.assertNull(containerToken +
"Even second heartbeat after activation shouldn't get any key updates!",
response.getContainerTokenMasterKey());
Assert.assertNull(nmToken +
"Even second heartbeat after activation shouldn't get any key updates!",
response.getNMTokenMasterKey());
dispatcher.await();
rm.stop();
}
}
| 6,537 | 37.916667 | 85 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestRecordFactory.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.nodemanager;
import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
import org.apache.hadoop.yarn.factories.RecordFactory;
import org.apache.hadoop.yarn.factories.impl.pb.RecordFactoryPBImpl;
import org.apache.hadoop.yarn.server.nodemanager.api.protocolrecords.LocalizerHeartbeatResponse;
import org.apache.hadoop.yarn.server.nodemanager.api.protocolrecords.impl.pb.LocalizerHeartbeatResponsePBImpl;
import org.junit.Test;
import org.junit.Assert;
public class TestRecordFactory {
@Test
public void testPbRecordFactory() {
RecordFactory pbRecordFactory = RecordFactoryPBImpl.get();
try {
LocalizerHeartbeatResponse response = pbRecordFactory.newRecordInstance(
LocalizerHeartbeatResponse.class);
Assert.assertEquals(LocalizerHeartbeatResponsePBImpl.class,
response.getClass());
} catch (YarnRuntimeException e) {
e.printStackTrace();
Assert.fail("Failed to crete record");
}
}
}
| 1,810 | 37.531915 | 110 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestDockerContainerExecutor.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.nodemanager;
import static org.junit.Assert.assertEquals;
import static org.mockito.Mockito.RETURNS_DEEP_STUBS;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.PrintWriter;
import java.util.HashMap;
import java.util.Map;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileContext;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.util.Shell;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
import org.apache.hadoop.yarn.server.nodemanager.executor.ContainerStartContext;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import com.google.common.base.Strings;
/**
* This is intended to test the DockerContainerExecutor code, but it requires
* docker to be installed.
* <br><ol>
* <li>To run the tests, set the docker-service-url to the host and port where
* docker service is running (If docker-service-url is not specified then the
* local daemon will be used).
* <br><pre><code>
* mvn test -Ddocker-service-url=tcp://0.0.0.0:4243 -Dtest=TestDockerContainerExecutor
* </code></pre>
*/
public class TestDockerContainerExecutor {
private static final Log LOG = LogFactory
.getLog(TestDockerContainerExecutor.class);
private static File workSpace = null;
private DockerContainerExecutor exec = null;
private LocalDirsHandlerService dirsHandler;
private Path workDir;
private FileContext lfs;
private String yarnImage;
private String appSubmitter;
private String dockerUrl;
private String testImage = "centos:latest";
private String dockerExec;
private ContainerId getNextContainerId() {
ContainerId cId = mock(ContainerId.class, RETURNS_DEEP_STUBS);
String id = "CONTAINER_" + System.currentTimeMillis();
when(cId.toString()).thenReturn(id);
return cId;
}
@Before
//Initialize a new DockerContainerExecutor that will be used to launch mocked
//containers.
public void setup() {
try {
lfs = FileContext.getLocalFSFileContext();
workDir = new Path("/tmp/temp-" + System.currentTimeMillis());
workSpace = new File(workDir.toUri().getPath());
lfs.mkdir(workDir, FsPermission.getDirDefault(), true);
} catch (IOException e) {
throw new RuntimeException(e);
}
Configuration conf = new Configuration();
yarnImage = "yarnImage";
long time = System.currentTimeMillis();
conf.set(YarnConfiguration.NM_LOCAL_DIRS, "/tmp/nm-local-dir" + time);
conf.set(YarnConfiguration.NM_LOG_DIRS, "/tmp/userlogs" + time);
dockerUrl = System.getProperty("docker-service-url");
LOG.info("dockerUrl: " + dockerUrl);
if (!Strings.isNullOrEmpty(dockerUrl)) {
dockerUrl = " -H " + dockerUrl;
} else if(isDockerDaemonRunningLocally()) {
dockerUrl = "";
} else {
return;
}
dockerExec = "docker " + dockerUrl;
conf.set(
YarnConfiguration.NM_DOCKER_CONTAINER_EXECUTOR_IMAGE_NAME, yarnImage);
conf.set(
YarnConfiguration.NM_DOCKER_CONTAINER_EXECUTOR_EXEC_NAME, dockerExec);
exec = new DockerContainerExecutor();
dirsHandler = new LocalDirsHandlerService();
dirsHandler.init(conf);
exec.setConf(conf);
appSubmitter = System.getProperty("application.submitter");
if (appSubmitter == null || appSubmitter.isEmpty()) {
appSubmitter = "nobody";
}
shellExec(dockerExec + " pull " + testImage);
}
private Shell.ShellCommandExecutor shellExec(String command) {
try {
Shell.ShellCommandExecutor shExec = new Shell.ShellCommandExecutor(
command.split("\\s+"),
new File(workDir.toUri().getPath()),
System.getenv());
shExec.execute();
return shExec;
} catch (IOException e) {
throw new RuntimeException(e);
}
}
private boolean shouldRun() {
return exec != null;
}
private boolean isDockerDaemonRunningLocally() {
boolean dockerDaemonRunningLocally = true;
try {
shellExec("docker info");
} catch (Exception e) {
LOG.info("docker daemon is not running on local machine.");
dockerDaemonRunningLocally = false;
}
return dockerDaemonRunningLocally;
}
/**
* Test that a docker container can be launched to run a command
* @param cId a fake ContainerID
* @param launchCtxEnv
* @param cmd the command to launch inside the docker container
* @return the exit code of the process used to launch the docker container
* @throws IOException
*/
private int runAndBlock(ContainerId cId, Map<String, String> launchCtxEnv,
String... cmd) throws IOException {
String appId = "APP_" + System.currentTimeMillis();
Container container = mock(Container.class);
ContainerLaunchContext context = mock(ContainerLaunchContext.class);
when(container.getContainerId()).thenReturn(cId);
when(container.getLaunchContext()).thenReturn(context);
when(cId.getApplicationAttemptId().getApplicationId().toString())
.thenReturn(appId);
when(context.getEnvironment()).thenReturn(launchCtxEnv);
String script = writeScriptFile(launchCtxEnv, cmd);
Path scriptPath = new Path(script);
Path tokensPath = new Path("/dev/null");
Path workDir = new Path(workSpace.getAbsolutePath());
Path pidFile = new Path(workDir, "pid.txt");
exec.activateContainer(cId, pidFile);
return exec.launchContainer(new ContainerStartContext.Builder()
.setContainer(container)
.setNmPrivateContainerScriptPath(scriptPath)
.setNmPrivateTokensPath(tokensPath)
.setUser(appSubmitter)
.setAppId(appId)
.setContainerWorkDir(workDir)
.setLocalDirs(dirsHandler.getLocalDirs())
.setLogDirs(dirsHandler.getLogDirs())
.build());
}
// Write the script used to launch the docker container in a temp file
private String writeScriptFile(Map<String, String> launchCtxEnv,
String... cmd) throws IOException {
File f = File.createTempFile("TestDockerContainerExecutor", ".sh");
f.deleteOnExit();
PrintWriter p = new PrintWriter(new FileOutputStream(f));
for(Map.Entry<String, String> entry: launchCtxEnv.entrySet()) {
p.println("export " + entry.getKey() + "=\"" + entry.getValue() + "\"");
}
for (String part : cmd) {
p.print(part.replace("\\", "\\\\").replace("'", "\\'"));
p.print(" ");
}
p.println();
p.close();
return f.getAbsolutePath();
}
@After
public void tearDown() {
try {
lfs.delete(workDir, true);
} catch (IOException e) {
throw new RuntimeException(e);
}
}
/**
* Test that a touch command can be launched successfully in a docker
* container
*/
@Test(timeout=1000000)
public void testLaunchContainer() throws IOException {
if (!shouldRun()) {
LOG.warn("Docker not installed, aborting test.");
return;
}
Map<String, String> env = new HashMap<String, String>();
env.put(YarnConfiguration.NM_DOCKER_CONTAINER_EXECUTOR_IMAGE_NAME,
testImage);
String touchFileName = "touch-file-" + System.currentTimeMillis();
File touchFile = new File(dirsHandler.getLocalDirs().get(0), touchFileName);
ContainerId cId = getNextContainerId();
int ret = runAndBlock(cId, env, "touch", touchFile.getAbsolutePath(), "&&",
"cp", touchFile.getAbsolutePath(), "/");
assertEquals(0, ret);
}
}
| 8,657 | 34.338776 | 86 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerReboot.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.nodemanager;
import static org.mockito.Matchers.argThat;
import static org.mockito.Matchers.isNull;
import static org.mockito.Mockito.spy;
import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
import java.io.File;
import java.io.IOException;
import java.security.PrivilegedExceptionAction;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.FileContext;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.RemoteIterator;
import org.apache.hadoop.fs.UnsupportedFileSystemException;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.yarn.api.ContainerManagementProtocol;
import org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusesRequest;
import org.apache.hadoop.yarn.api.protocolrecords.StartContainerRequest;
import org.apache.hadoop.yarn.api.protocolrecords.StartContainersRequest;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
import org.apache.hadoop.yarn.api.records.LocalResource;
import org.apache.hadoop.yarn.api.records.LocalResourceType;
import org.apache.hadoop.yarn.api.records.LocalResourceVisibility;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.api.records.URL;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.event.Dispatcher;
import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.security.NMTokenIdentifier;
import org.apache.hadoop.yarn.server.nodemanager.DeletionService.FileDeletionTask;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.TestContainerManager;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerState;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ContainerLocalizer;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ResourceLocalizationService;
import org.apache.hadoop.yarn.util.ConverterUtils;
import org.apache.hadoop.yarn.util.Records;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.mockito.ArgumentMatcher;
public class TestNodeManagerReboot {
static final File basedir = new File("target",
TestNodeManagerReboot.class.getName());
static final File logsDir = new File(basedir, "logs");
static final File nmLocalDir = new File(basedir, "nm0");
static final File localResourceDir = new File(basedir, "resource");
static final String user = System.getProperty("user.name");
private FileContext localFS;
private MyNodeManager nm;
private DeletionService delService;
static final Log LOG = LogFactory.getLog(TestNodeManagerReboot.class);
@Before
public void setup() throws UnsupportedFileSystemException {
localFS = FileContext.getLocalFSFileContext();
}
@After
public void tearDown() throws IOException, InterruptedException {
localFS.delete(new Path(basedir.getPath()), true);
if (nm != null) {
nm.stop();
}
}
@Test(timeout = 2000000)
public void testClearLocalDirWhenNodeReboot() throws IOException,
YarnException, InterruptedException {
nm = new MyNodeManager();
nm.start();
final ContainerManagementProtocol containerManager =
nm.getContainerManager();
// create files under fileCache
createFiles(nmLocalDir.getAbsolutePath(), ContainerLocalizer.FILECACHE, 100);
localResourceDir.mkdirs();
ContainerLaunchContext containerLaunchContext =
Records.newRecord(ContainerLaunchContext.class);
// Construct the Container-id
ContainerId cId = createContainerId();
URL localResourceUri =
ConverterUtils.getYarnUrlFromPath(localFS.makeQualified(new Path(
localResourceDir.getAbsolutePath())));
LocalResource localResource =
LocalResource.newInstance(localResourceUri, LocalResourceType.FILE,
LocalResourceVisibility.APPLICATION, -1,
localResourceDir.lastModified());
String destinationFile = "dest_file";
Map<String, LocalResource> localResources =
new HashMap<String, LocalResource>();
localResources.put(destinationFile, localResource);
containerLaunchContext.setLocalResources(localResources);
List<String> commands = new ArrayList<String>();
containerLaunchContext.setCommands(commands);
NodeId nodeId = nm.getNMContext().getNodeId();
StartContainerRequest scRequest =
StartContainerRequest.newInstance(containerLaunchContext,
TestContainerManager.createContainerToken(
cId, 0, nodeId, destinationFile, nm.getNMContext()
.getContainerTokenSecretManager()));
List<StartContainerRequest> list = new ArrayList<StartContainerRequest>();
list.add(scRequest);
final StartContainersRequest allRequests =
StartContainersRequest.newInstance(list);
final UserGroupInformation currentUser =
UserGroupInformation.createRemoteUser(cId.getApplicationAttemptId()
.toString());
NMTokenIdentifier nmIdentifier =
new NMTokenIdentifier(cId.getApplicationAttemptId(), nodeId, user, 123);
currentUser.addTokenIdentifier(nmIdentifier);
currentUser.doAs(new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws YarnException, IOException {
nm.getContainerManager().startContainers(allRequests);
return null;
}
});
List<ContainerId> containerIds = new ArrayList<ContainerId>();
containerIds.add(cId);
GetContainerStatusesRequest request =
GetContainerStatusesRequest.newInstance(containerIds);
Container container =
nm.getNMContext().getContainers().get(request.getContainerIds().get(0));
final int MAX_TRIES = 20;
int numTries = 0;
while (!container.getContainerState().equals(ContainerState.DONE)
&& numTries <= MAX_TRIES) {
try {
Thread.sleep(500);
} catch (InterruptedException ex) {
// Do nothing
}
numTries++;
}
Assert.assertEquals(ContainerState.DONE, container.getContainerState());
Assert
.assertTrue(
"The container should create a subDir named currentUser: " + user
+ "under localDir/usercache",
numOfLocalDirs(nmLocalDir.getAbsolutePath(),
ContainerLocalizer.USERCACHE) > 0);
Assert.assertTrue(
"There should be files or Dirs under nm_private when "
+ "container is launched",
numOfLocalDirs(nmLocalDir.getAbsolutePath(),
ResourceLocalizationService.NM_PRIVATE_DIR) > 0);
// restart the NodeManager
restartNM(MAX_TRIES);
checkNumOfLocalDirs();
verify(delService, times(1)).delete(
(String) isNull(),
argThat(new PathInclude(ResourceLocalizationService.NM_PRIVATE_DIR
+ "_DEL_")));
verify(delService, times(1)).delete((String) isNull(),
argThat(new PathInclude(ContainerLocalizer.FILECACHE + "_DEL_")));
verify(delService, times(1)).scheduleFileDeletionTask(
argThat(new FileDeletionInclude(user, null,
new String[] { destinationFile })));
verify(delService, times(1)).scheduleFileDeletionTask(
argThat(new FileDeletionInclude(null, ContainerLocalizer.USERCACHE
+ "_DEL_", new String[] {})));
// restart the NodeManager again
// this time usercache directory should be empty
restartNM(MAX_TRIES);
checkNumOfLocalDirs();
}
private void restartNM(int maxTries) {
nm.stop();
nm = new MyNodeManager();
nm.start();
int numTries = 0;
while ((numOfLocalDirs(nmLocalDir.getAbsolutePath(),
ContainerLocalizer.USERCACHE) > 0
|| numOfLocalDirs(nmLocalDir.getAbsolutePath(),
ContainerLocalizer.FILECACHE) > 0 || numOfLocalDirs(
nmLocalDir.getAbsolutePath(), ResourceLocalizationService.NM_PRIVATE_DIR) > 0)
&& numTries < maxTries) {
try {
Thread.sleep(500);
} catch (InterruptedException ex) {
// Do nothing
}
numTries++;
}
}
private void checkNumOfLocalDirs() throws IOException {
Assert
.assertTrue(
"After NM reboots, all local files should be deleted",
numOfLocalDirs(nmLocalDir.getAbsolutePath(),
ContainerLocalizer.USERCACHE) == 0
&& numOfLocalDirs(nmLocalDir.getAbsolutePath(),
ContainerLocalizer.FILECACHE) == 0
&& numOfLocalDirs(nmLocalDir.getAbsolutePath(),
ResourceLocalizationService.NM_PRIVATE_DIR) == 0);
Assert
.assertTrue(
"After NM reboots, usercache_DEL_* directory should be deleted",
numOfUsercacheDELDirs(nmLocalDir.getAbsolutePath()) == 0);
}
private int numOfLocalDirs(String localDir, String localSubDir) {
File[] listOfFiles = new File(localDir, localSubDir).listFiles();
if (listOfFiles == null) {
return 0;
} else {
return listOfFiles.length;
}
}
private int numOfUsercacheDELDirs(String localDir) throws IOException {
int count = 0;
RemoteIterator<FileStatus> fileStatus = localFS.listStatus(new Path(localDir));
while (fileStatus.hasNext()) {
FileStatus status = fileStatus.next();
if (status.getPath().getName().matches(".*" +
ContainerLocalizer.USERCACHE + "_DEL_.*")) {
count++;
}
}
return count;
}
private void createFiles(String dir, String subDir, int numOfFiles) {
for (int i = 0; i < numOfFiles; i++) {
File newFile = new File(dir + "/" + subDir, "file_" + (i + 1));
try {
newFile.createNewFile();
} catch (IOException e) {
// Do nothing
}
}
}
private ContainerId createContainerId() {
ApplicationId appId = ApplicationId.newInstance(0, 0);
ApplicationAttemptId appAttemptId =
ApplicationAttemptId.newInstance(appId, 1);
ContainerId containerId = ContainerId.newContainerId(appAttemptId, 0);
return containerId;
}
private class MyNodeManager extends NodeManager {
public MyNodeManager() {
super();
this.init(createNMConfig());
}
@Override
protected NodeStatusUpdater createNodeStatusUpdater(Context context,
Dispatcher dispatcher, NodeHealthCheckerService healthChecker) {
MockNodeStatusUpdater myNodeStatusUpdater =
new MockNodeStatusUpdater(context, dispatcher, healthChecker, metrics);
return myNodeStatusUpdater;
}
@Override
protected DeletionService createDeletionService(ContainerExecutor exec) {
delService = spy(new DeletionService(exec));
return delService;
}
private YarnConfiguration createNMConfig() {
YarnConfiguration conf = new YarnConfiguration();
conf.setInt(YarnConfiguration.NM_PMEM_MB, 5 * 1024); // 5GB
conf.set(YarnConfiguration.NM_ADDRESS, "127.0.0.1:12345");
conf.set(YarnConfiguration.NM_LOCALIZER_ADDRESS, "127.0.0.1:12346");
conf.set(YarnConfiguration.NM_LOG_DIRS, logsDir.getAbsolutePath());
conf.set(YarnConfiguration.NM_LOCAL_DIRS, nmLocalDir.getAbsolutePath());
conf.setLong(YarnConfiguration.NM_LOG_RETAIN_SECONDS, 1);
return conf;
}
}
class PathInclude extends ArgumentMatcher<Path> {
final String part;
PathInclude(String part) {
this.part = part;
}
@Override
public boolean matches(Object o) {
return ((Path) o).getName().indexOf(part) != -1;
}
}
class FileDeletionInclude extends ArgumentMatcher<FileDeletionTask> {
final String user;
final String subDirIncludes;
final String[] baseDirIncludes;
public FileDeletionInclude(String user, String subDirIncludes,
String [] baseDirIncludes) {
this.user = user;
this.subDirIncludes = subDirIncludes;
this.baseDirIncludes = baseDirIncludes;
}
@Override
public boolean matches(Object o) {
FileDeletionTask fd = (FileDeletionTask)o;
if (fd.getUser() == null && user != null) {
return false;
} else if (fd.getUser() != null && user == null) {
return false;
} else if (fd.getUser() != null && user != null) {
return fd.getUser().equals(user);
}
if (!comparePaths(fd.getSubDir(), subDirIncludes)) {
return false;
}
if (baseDirIncludes == null && fd.getBaseDirs() != null) {
return false;
} else if (baseDirIncludes != null && fd.getBaseDirs() == null ) {
return false;
} else if (baseDirIncludes != null && fd.getBaseDirs() != null) {
if (baseDirIncludes.length != fd.getBaseDirs().size()) {
return false;
}
for (int i =0 ; i < baseDirIncludes.length; i++) {
if (!comparePaths(fd.getBaseDirs().get(i), baseDirIncludes[i])) {
return false;
}
}
}
return true;
}
public boolean comparePaths(Path p1, String p2) {
if (p1 == null && p2 != null){
return false;
} else if (p1 != null && p2 == null) {
return false;
} else if (p1 != null && p2 != null ){
return p1.toUri().getPath().contains(p2.toString());
}
return true;
}
}
}
| 14,462 | 35.339196 | 104 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerResync.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.nodemanager;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.BrokenBarrierException;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.CyclicBarrier;
import java.util.concurrent.atomic.AtomicBoolean;
import org.apache.hadoop.fs.FileContext;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.UnsupportedFileSystemException;
import org.apache.hadoop.yarn.api.protocolrecords.StartContainerRequest;
import org.apache.hadoop.yarn.api.protocolrecords.StartContainersRequest;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
import org.apache.hadoop.yarn.api.records.ContainerState;
import org.apache.hadoop.yarn.api.records.ContainerStatus;
import org.apache.hadoop.yarn.api.records.Priority;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.event.Dispatcher;
import org.apache.hadoop.yarn.exceptions.NMNotYetReadyException;
import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
import org.apache.hadoop.yarn.factories.RecordFactory;
import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
import org.apache.hadoop.yarn.server.api.ResourceTracker;
import org.apache.hadoop.yarn.server.api.protocolrecords.NMContainerStatus;
import org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatRequest;
import org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatResponse;
import org.apache.hadoop.yarn.server.api.protocolrecords.RegisterNodeManagerRequest;
import org.apache.hadoop.yarn.server.api.protocolrecords.RegisterNodeManagerResponse;
import org.apache.hadoop.yarn.server.api.records.NodeAction;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.ContainerManagerImpl;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.Application;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
import org.apache.hadoop.yarn.server.nodemanager.metrics.NodeManagerMetrics;
import org.apache.hadoop.yarn.server.security.ApplicationACLsManager;
import org.apache.hadoop.yarn.server.utils.YarnServerBuilderUtils;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
public class TestNodeManagerResync {
static final File basedir =
new File("target", TestNodeManagerResync.class.getName());
static final File tmpDir = new File(basedir, "tmpDir");
static final File logsDir = new File(basedir, "logs");
static final File remoteLogsDir = new File(basedir, "remotelogs");
static final File nmLocalDir = new File(basedir, "nm0");
static final File processStartFile = new File(tmpDir, "start_file.txt")
.getAbsoluteFile();
static final RecordFactory recordFactory = RecordFactoryProvider
.getRecordFactory(null);
static final String user = "nobody";
private FileContext localFS;
private CyclicBarrier syncBarrier;
private AtomicBoolean assertionFailedInThread = new AtomicBoolean(false);
private AtomicBoolean isNMShutdownCalled = new AtomicBoolean(false);
private final NodeManagerEvent resyncEvent =
new NodeManagerEvent(NodeManagerEventType.RESYNC);
@Before
public void setup() throws UnsupportedFileSystemException {
localFS = FileContext.getLocalFSFileContext();
tmpDir.mkdirs();
logsDir.mkdirs();
remoteLogsDir.mkdirs();
nmLocalDir.mkdirs();
syncBarrier = new CyclicBarrier(2);
}
@After
public void tearDown() throws IOException, InterruptedException {
localFS.delete(new Path(basedir.getPath()), true);
assertionFailedInThread.set(false);
}
@Test
public void testKillContainersOnResync() throws IOException,
InterruptedException, YarnException {
TestNodeManager1 nm = new TestNodeManager1(false);
testContainerPreservationOnResyncImpl(nm, false);
}
@Test
public void testPreserveContainersOnResyncKeepingContainers() throws
IOException,
InterruptedException, YarnException {
TestNodeManager1 nm = new TestNodeManager1(true);
testContainerPreservationOnResyncImpl(nm, true);
}
@SuppressWarnings("unchecked")
protected void testContainerPreservationOnResyncImpl(TestNodeManager1 nm,
boolean isWorkPreservingRestartEnabled)
throws IOException, YarnException, InterruptedException {
YarnConfiguration conf = createNMConfig();
conf.setBoolean(YarnConfiguration.RM_WORK_PRESERVING_RECOVERY_ENABLED,
isWorkPreservingRestartEnabled);
try {
nm.init(conf);
nm.start();
ContainerId cId = TestNodeManagerShutdown.createContainerId();
TestNodeManagerShutdown.startContainer(nm, cId, localFS, tmpDir,
processStartFile);
nm.setExistingContainerId(cId);
Assert.assertEquals(1, ((TestNodeManager1) nm).getNMRegistrationCount());
nm.getNMDispatcher().getEventHandler().handle(resyncEvent);
try {
syncBarrier.await();
} catch (BrokenBarrierException e) {
}
Assert.assertEquals(2, ((TestNodeManager1) nm).getNMRegistrationCount());
// Only containers should be killed on resync, apps should lie around.
// That way local resources for apps can be used beyond resync without
// relocalization
Assert.assertTrue(nm.getNMContext().getApplications()
.containsKey(cId.getApplicationAttemptId().getApplicationId()));
Assert.assertFalse(assertionFailedInThread.get());
}
finally {
nm.stop();
}
}
// This test tests new container requests are blocked when NM starts from
// scratch until it register with RM AND while NM is resyncing with RM
@SuppressWarnings("unchecked")
@Test(timeout=60000)
public void testBlockNewContainerRequestsOnStartAndResync()
throws IOException, InterruptedException, YarnException {
NodeManager nm = new TestNodeManager2();
YarnConfiguration conf = createNMConfig();
conf.setBoolean(YarnConfiguration.RM_WORK_PRESERVING_RECOVERY_ENABLED, false);
nm.init(conf);
nm.start();
// Start the container in running state
ContainerId cId = TestNodeManagerShutdown.createContainerId();
TestNodeManagerShutdown.startContainer(nm, cId, localFS, tmpDir,
processStartFile);
nm.getNMDispatcher().getEventHandler()
.handle(new NodeManagerEvent(NodeManagerEventType.RESYNC));
try {
syncBarrier.await();
} catch (BrokenBarrierException e) {
}
Assert.assertFalse(assertionFailedInThread.get());
nm.stop();
}
@SuppressWarnings("unchecked")
@Test(timeout=10000)
public void testNMshutdownWhenResyncThrowException() throws IOException,
InterruptedException, YarnException {
NodeManager nm = new TestNodeManager3();
YarnConfiguration conf = createNMConfig();
nm.init(conf);
nm.start();
Assert.assertEquals(1, ((TestNodeManager3) nm).getNMRegistrationCount());
nm.getNMDispatcher().getEventHandler()
.handle(new NodeManagerEvent(NodeManagerEventType.RESYNC));
synchronized (isNMShutdownCalled) {
while (isNMShutdownCalled.get() == false) {
try {
isNMShutdownCalled.wait();
} catch (InterruptedException e) {
}
}
}
Assert.assertTrue("NM shutdown not called.",isNMShutdownCalled.get());
nm.stop();
}
// This is to test when NM gets the resync response from last heart beat, it
// should be able to send the already-sent-via-last-heart-beat container
// statuses again when it re-register with RM.
@Test
public void testNMSentContainerStatusOnResync() throws Exception {
final ContainerStatus testCompleteContainer =
TestNodeStatusUpdater.createContainerStatus(2, ContainerState.COMPLETE);
final Container container =
TestNodeStatusUpdater.getMockContainer(testCompleteContainer);
NMContainerStatus report =
createNMContainerStatus(2, ContainerState.COMPLETE);
when(container.getNMContainerStatus()).thenReturn(report);
NodeManager nm = new NodeManager() {
int registerCount = 0;
@Override
protected NodeStatusUpdater createNodeStatusUpdater(Context context,
Dispatcher dispatcher, NodeHealthCheckerService healthChecker) {
return new TestNodeStatusUpdaterResync(context, dispatcher,
healthChecker, metrics) {
@Override
protected ResourceTracker createResourceTracker() {
return new MockResourceTracker() {
@Override
public RegisterNodeManagerResponse registerNodeManager(
RegisterNodeManagerRequest request) throws YarnException,
IOException {
if (registerCount == 0) {
// first register, no containers info.
try {
Assert.assertEquals(0, request.getNMContainerStatuses()
.size());
} catch (AssertionError error) {
error.printStackTrace();
assertionFailedInThread.set(true);
}
// put the completed container into the context
getNMContext().getContainers().put(
testCompleteContainer.getContainerId(), container);
getNMContext().getApplications().put(
testCompleteContainer.getContainerId()
.getApplicationAttemptId().getApplicationId(),
mock(Application.class));
} else {
// second register contains the completed container info.
List<NMContainerStatus> statuses =
request.getNMContainerStatuses();
try {
Assert.assertEquals(1, statuses.size());
Assert.assertEquals(testCompleteContainer.getContainerId(),
statuses.get(0).getContainerId());
} catch (AssertionError error) {
error.printStackTrace();
assertionFailedInThread.set(true);
}
}
registerCount++;
return super.registerNodeManager(request);
}
@Override
public NodeHeartbeatResponse nodeHeartbeat(
NodeHeartbeatRequest request) {
// first heartBeat contains the completed container info
List<ContainerStatus> statuses =
request.getNodeStatus().getContainersStatuses();
try {
Assert.assertEquals(1, statuses.size());
Assert.assertEquals(testCompleteContainer.getContainerId(),
statuses.get(0).getContainerId());
} catch (AssertionError error) {
error.printStackTrace();
assertionFailedInThread.set(true);
}
// notify RESYNC on first heartbeat.
return YarnServerBuilderUtils.newNodeHeartbeatResponse(1,
NodeAction.RESYNC, null, null, null, null, 1000L);
}
};
}
};
}
};
YarnConfiguration conf = createNMConfig();
nm.init(conf);
nm.start();
try {
syncBarrier.await();
} catch (BrokenBarrierException e) {
}
Assert.assertFalse(assertionFailedInThread.get());
nm.stop();
}
// This can be used as a common base class for testing NM resync behavior.
class TestNodeStatusUpdaterResync extends MockNodeStatusUpdater {
public TestNodeStatusUpdaterResync(Context context, Dispatcher dispatcher,
NodeHealthCheckerService healthChecker, NodeManagerMetrics metrics) {
super(context, dispatcher, healthChecker, metrics);
}
@Override
protected void rebootNodeStatusUpdaterAndRegisterWithRM() {
try {
// Wait here so as to sync with the main test thread.
super.rebootNodeStatusUpdaterAndRegisterWithRM();
syncBarrier.await();
} catch (InterruptedException e) {
} catch (BrokenBarrierException e) {
} catch (AssertionError ae) {
ae.printStackTrace();
assertionFailedInThread.set(true);
}
}
}
private YarnConfiguration createNMConfig() {
YarnConfiguration conf = new YarnConfiguration();
conf.setInt(YarnConfiguration.NM_PMEM_MB, 5*1024); // 5GB
conf.set(YarnConfiguration.NM_ADDRESS, "127.0.0.1:12345");
conf.set(YarnConfiguration.NM_LOCALIZER_ADDRESS, "127.0.0.1:12346");
conf.set(YarnConfiguration.NM_LOG_DIRS, logsDir.getAbsolutePath());
conf.set(YarnConfiguration.NM_REMOTE_APP_LOG_DIR,
remoteLogsDir.getAbsolutePath());
conf.set(YarnConfiguration.NM_LOCAL_DIRS, nmLocalDir.getAbsolutePath());
conf.setLong(YarnConfiguration.NM_LOG_RETAIN_SECONDS, 1);
return conf;
}
class TestNodeManager1 extends NodeManager {
private int registrationCount = 0;
private boolean containersShouldBePreserved;
private ContainerId existingCid;
public TestNodeManager1(boolean containersShouldBePreserved) {
this.containersShouldBePreserved = containersShouldBePreserved;
}
public void setExistingContainerId(ContainerId cId) {
existingCid = cId;
}
@Override
protected NodeStatusUpdater createNodeStatusUpdater(Context context,
Dispatcher dispatcher, NodeHealthCheckerService healthChecker) {
return new TestNodeStatusUpdaterImpl1(context, dispatcher,
healthChecker, metrics);
}
public int getNMRegistrationCount() {
return registrationCount;
}
class TestNodeStatusUpdaterImpl1 extends MockNodeStatusUpdater {
public TestNodeStatusUpdaterImpl1(Context context, Dispatcher dispatcher,
NodeHealthCheckerService healthChecker, NodeManagerMetrics metrics) {
super(context, dispatcher, healthChecker, metrics);
}
@Override
protected void registerWithRM() throws YarnException, IOException {
super.registerWithRM();
registrationCount++;
}
@Override
protected void rebootNodeStatusUpdaterAndRegisterWithRM() {
ConcurrentMap<ContainerId, org.apache.hadoop.yarn.server.nodemanager
.containermanager.container.Container> containers =
getNMContext().getContainers();
try {
try {
if (containersShouldBePreserved) {
Assert.assertFalse(containers.isEmpty());
Assert.assertTrue(containers.containsKey(existingCid));
Assert.assertEquals(ContainerState.RUNNING,
containers.get(existingCid)
.cloneAndGetContainerStatus().getState());
} else {
// ensure that containers are empty or are completed before
// restart nodeStatusUpdater
if (!containers.isEmpty()) {
Assert.assertEquals(ContainerState.COMPLETE,
containers.get(existingCid)
.cloneAndGetContainerStatus().getState());
}
}
super.rebootNodeStatusUpdaterAndRegisterWithRM();
}
catch (AssertionError ae) {
ae.printStackTrace();
assertionFailedInThread.set(true);
}
finally {
syncBarrier.await();
}
} catch (InterruptedException e) {
} catch (BrokenBarrierException e) {
} catch (AssertionError ae) {
ae.printStackTrace();
assertionFailedInThread.set(true);
}
}
}
}
class TestNodeManager2 extends NodeManager {
Thread launchContainersThread = null;
@Override
protected NodeStatusUpdater createNodeStatusUpdater(Context context,
Dispatcher dispatcher, NodeHealthCheckerService healthChecker) {
return new TestNodeStatusUpdaterImpl2(context, dispatcher,
healthChecker, metrics);
}
@Override
protected ContainerManagerImpl createContainerManager(Context context,
ContainerExecutor exec, DeletionService del,
NodeStatusUpdater nodeStatusUpdater, ApplicationACLsManager aclsManager,
LocalDirsHandlerService dirsHandler) {
return new ContainerManagerImpl(context, exec, del, nodeStatusUpdater,
metrics, aclsManager, dirsHandler){
@Override
public void setBlockNewContainerRequests(
boolean blockNewContainerRequests) {
if (blockNewContainerRequests) {
// start test thread right after blockNewContainerRequests is set
// true
super.setBlockNewContainerRequests(blockNewContainerRequests);
launchContainersThread = new RejectedContainersLauncherThread();
launchContainersThread.start();
} else {
// join the test thread right before blockNewContainerRequests is
// reset
try {
// stop the test thread
((RejectedContainersLauncherThread) launchContainersThread)
.setStopThreadFlag(true);
launchContainersThread.join();
((RejectedContainersLauncherThread) launchContainersThread)
.setStopThreadFlag(false);
super.setBlockNewContainerRequests(blockNewContainerRequests);
} catch (InterruptedException e) {
e.printStackTrace();
}
}
}
};
}
class TestNodeStatusUpdaterImpl2 extends MockNodeStatusUpdater {
public TestNodeStatusUpdaterImpl2(Context context, Dispatcher dispatcher,
NodeHealthCheckerService healthChecker, NodeManagerMetrics metrics) {
super(context, dispatcher, healthChecker, metrics);
}
@Override
protected void rebootNodeStatusUpdaterAndRegisterWithRM() {
ConcurrentMap<ContainerId, org.apache.hadoop.yarn.server.nodemanager
.containermanager.container.Container> containers =
getNMContext().getContainers();
try {
// ensure that containers are empty before restart nodeStatusUpdater
if (!containers.isEmpty()) {
for (Container container: containers.values()) {
Assert.assertEquals(ContainerState.COMPLETE,
container.cloneAndGetContainerStatus().getState());
}
}
super.rebootNodeStatusUpdaterAndRegisterWithRM();
// After this point new containers are free to be launched, except
// containers from previous RM
// Wait here so as to sync with the main test thread.
syncBarrier.await();
} catch (InterruptedException e) {
} catch (BrokenBarrierException e) {
} catch (AssertionError ae) {
ae.printStackTrace();
assertionFailedInThread.set(true);
}
}
}
class RejectedContainersLauncherThread extends Thread {
boolean isStopped = false;
public void setStopThreadFlag(boolean isStopped) {
this.isStopped = isStopped;
}
@Override
public void run() {
int numContainers = 0;
int numContainersRejected = 0;
ContainerLaunchContext containerLaunchContext =
recordFactory.newRecordInstance(ContainerLaunchContext.class);
try {
while (!isStopped && numContainers < 10) {
StartContainerRequest scRequest =
StartContainerRequest.newInstance(containerLaunchContext,
null);
List<StartContainerRequest> list = new ArrayList<StartContainerRequest>();
list.add(scRequest);
StartContainersRequest allRequests =
StartContainersRequest.newInstance(list);
System.out.println("no. of containers to be launched: "
+ numContainers);
numContainers++;
try {
getContainerManager().startContainers(allRequests);
} catch (YarnException e) {
numContainersRejected++;
Assert.assertTrue(e.getMessage().contains(
"Rejecting new containers as NodeManager has not" +
" yet connected with ResourceManager"));
Assert.assertEquals(NMNotYetReadyException.class.getName(), e
.getClass().getName());
} catch (IOException e) {
e.printStackTrace();
assertionFailedInThread.set(true);
}
}
// no. of containers to be launched should equal to no. of
// containers rejected
Assert.assertEquals(numContainers, numContainersRejected);
} catch (AssertionError ae) {
assertionFailedInThread.set(true);
}
}
}
}
class TestNodeManager3 extends NodeManager {
private int registrationCount = 0;
@Override
protected NodeStatusUpdater createNodeStatusUpdater(Context context,
Dispatcher dispatcher, NodeHealthCheckerService healthChecker) {
return new TestNodeStatusUpdaterImpl3(context, dispatcher, healthChecker,
metrics);
}
public int getNMRegistrationCount() {
return registrationCount;
}
@Override
protected void shutDown() {
synchronized (isNMShutdownCalled) {
isNMShutdownCalled.set(true);
isNMShutdownCalled.notify();
}
}
class TestNodeStatusUpdaterImpl3 extends MockNodeStatusUpdater {
public TestNodeStatusUpdaterImpl3(Context context, Dispatcher dispatcher,
NodeHealthCheckerService healthChecker, NodeManagerMetrics metrics) {
super(context, dispatcher, healthChecker, metrics);
}
@Override
protected void registerWithRM() throws YarnException, IOException {
super.registerWithRM();
registrationCount++;
if (registrationCount > 1) {
throw new YarnRuntimeException("Registration with RM failed.");
}
}
}}
public static NMContainerStatus createNMContainerStatus(int id,
ContainerState containerState) {
ApplicationId applicationId = ApplicationId.newInstance(0, 1);
ApplicationAttemptId applicationAttemptId =
ApplicationAttemptId.newInstance(applicationId, 1);
ContainerId containerId = ContainerId.newContainerId(applicationAttemptId, id);
NMContainerStatus containerReport =
NMContainerStatus.newInstance(containerId, containerState,
Resource.newInstance(1024, 1), "recover container", 0,
Priority.newInstance(10), 0);
return containerReport;
}
}
| 23,797 | 38.400662 | 90 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLinuxContainerExecutorWithMocks.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.nodemanager;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.junit.Assume.assumeTrue;
import static org.mockito.Matchers.any;
import static org.mockito.Mockito.doAnswer;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.spy;
import static org.mockito.Mockito.when;
import java.io.File;
import java.io.FileReader;
import java.io.IOException;
import java.io.LineNumberReader;
import java.net.InetSocketAddress;
import java.nio.file.Files;
import java.nio.file.Paths;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.LinkedList;
import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerDiagnosticsUpdateEvent;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperation;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperationExecutor;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.DefaultLinuxContainerRuntime;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.LinuxContainerRuntime;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.runtime.ContainerExecutionException;
import org.apache.hadoop.yarn.server.nodemanager.executor.ContainerSignalContext;
import org.apache.hadoop.yarn.server.nodemanager.executor.ContainerStartContext;
import org.apache.hadoop.yarn.server.nodemanager.executor.DeletionAsUserContext;
import org.apache.hadoop.yarn.server.nodemanager.executor.LocalizerStartContext;
import org.junit.Assert;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import org.mockito.invocation.InvocationOnMock;
import org.mockito.stubbing.Answer;
import static java.nio.file.StandardCopyOption.REPLACE_EXISTING;
public class TestLinuxContainerExecutorWithMocks {
private static final Log LOG = LogFactory
.getLog(TestLinuxContainerExecutorWithMocks.class);
private static final String MOCK_EXECUTOR =
"./src/test/resources/mock-container-executor";
private static final String MOCK_EXECUTOR_WITH_ERROR =
"./src/test/resources/mock-container-executer-with-error";
private String tmpMockExecutor;
private LinuxContainerExecutor mockExec = null;
private final File mockParamFile = new File("./params.txt");
private LocalDirsHandlerService dirsHandler;
private void deleteMockParamFile() {
if(mockParamFile.exists()) {
mockParamFile.delete();
}
}
private List<String> readMockParams() throws IOException {
LinkedList<String> ret = new LinkedList<String>();
LineNumberReader reader = new LineNumberReader(new FileReader(
mockParamFile));
String line;
while((line = reader.readLine()) != null) {
ret.add(line);
}
reader.close();
return ret;
}
private void setupMockExecutor(String executorPath, Configuration conf)
throws IOException {
//we'll always use the tmpMockExecutor - since
// PrivilegedOperationExecutor can only be initialized once.
Files.copy(Paths.get(executorPath), Paths.get(tmpMockExecutor),
REPLACE_EXISTING);
File executor = new File(tmpMockExecutor);
if (!FileUtil.canExecute(executor)) {
FileUtil.setExecutable(executor, true);
}
String executorAbsolutePath = executor.getAbsolutePath();
conf.set(YarnConfiguration.NM_LINUX_CONTAINER_EXECUTOR_PATH,
executorAbsolutePath);
}
@Before
public void setup() throws IOException, ContainerExecutionException {
assumeTrue(!Path.WINDOWS);
tmpMockExecutor = System.getProperty("test.build.data") +
"/tmp-mock-container-executor";
Configuration conf = new Configuration();
LinuxContainerRuntime linuxContainerRuntime;
setupMockExecutor(MOCK_EXECUTOR, conf);
linuxContainerRuntime = new DefaultLinuxContainerRuntime(
PrivilegedOperationExecutor.getInstance(conf));
dirsHandler = new LocalDirsHandlerService();
dirsHandler.init(conf);
linuxContainerRuntime.initialize(conf);
mockExec = new LinuxContainerExecutor(linuxContainerRuntime);
mockExec.setConf(conf);
}
@After
public void tearDown() {
deleteMockParamFile();
}
@Test
public void testContainerLaunch() throws IOException {
String appSubmitter = "nobody";
String cmd = String.valueOf(
PrivilegedOperation.RunAsUserCommand.LAUNCH_CONTAINER.getValue());
String appId = "APP_ID";
String containerId = "CONTAINER_ID";
Container container = mock(Container.class);
ContainerId cId = mock(ContainerId.class);
ContainerLaunchContext context = mock(ContainerLaunchContext.class);
HashMap<String, String> env = new HashMap<String,String>();
when(container.getContainerId()).thenReturn(cId);
when(container.getLaunchContext()).thenReturn(context);
when(cId.toString()).thenReturn(containerId);
when(context.getEnvironment()).thenReturn(env);
Path scriptPath = new Path("file:///bin/echo");
Path tokensPath = new Path("file:///dev/null");
Path workDir = new Path("/tmp");
Path pidFile = new Path(workDir, "pid.txt");
mockExec.activateContainer(cId, pidFile);
int ret = mockExec.launchContainer(new ContainerStartContext.Builder()
.setContainer(container)
.setNmPrivateContainerScriptPath(scriptPath)
.setNmPrivateTokensPath(tokensPath)
.setUser(appSubmitter)
.setAppId(appId)
.setContainerWorkDir(workDir)
.setLocalDirs(dirsHandler.getLocalDirs())
.setLogDirs(dirsHandler.getLogDirs())
.build());
assertEquals(0, ret);
assertEquals(Arrays.asList(YarnConfiguration.DEFAULT_NM_NONSECURE_MODE_LOCAL_USER,
appSubmitter, cmd, appId, containerId,
workDir.toString(), "/bin/echo", "/dev/null", pidFile.toString(),
StringUtils.join(PrivilegedOperation.LINUX_FILE_PATH_SEPARATOR,
dirsHandler.getLocalDirs()),
StringUtils.join(PrivilegedOperation.LINUX_FILE_PATH_SEPARATOR,
dirsHandler.getLogDirs()), "cgroups=none"),
readMockParams());
}
@Test (timeout = 5000)
public void testContainerLaunchWithPriority() throws IOException {
// set the scheduler priority to make sure still works with nice -n prio
Configuration conf = new Configuration();
setupMockExecutor(MOCK_EXECUTOR, conf);
conf.setInt(YarnConfiguration.NM_CONTAINER_EXECUTOR_SCHED_PRIORITY, 2);
mockExec.setConf(conf);
List<String> command = new ArrayList<String>();
mockExec.addSchedPriorityCommand(command);
assertEquals("first should be nice", "nice", command.get(0));
assertEquals("second should be -n", "-n", command.get(1));
assertEquals("third should be the priority", Integer.toString(2),
command.get(2));
testContainerLaunch();
}
@Test (timeout = 5000)
public void testLaunchCommandWithoutPriority() throws IOException {
// make sure the command doesn't contain the nice -n since priority
// not specified
List<String> command = new ArrayList<String>();
mockExec.addSchedPriorityCommand(command);
assertEquals("addSchedPriority should be empty", 0, command.size());
}
@Test (timeout = 5000)
public void testStartLocalizer() throws IOException {
InetSocketAddress address = InetSocketAddress.createUnresolved("localhost", 8040);
Path nmPrivateCTokensPath= new Path("file:///bin/nmPrivateCTokensPath");
try {
mockExec.startLocalizer(new LocalizerStartContext.Builder()
.setNmPrivateContainerTokens(nmPrivateCTokensPath)
.setNmAddr(address)
.setUser("test")
.setAppId("application_0")
.setLocId("12345")
.setDirsHandler(dirsHandler)
.build());
List<String> result=readMockParams();
Assert.assertEquals(result.size(), 18);
Assert.assertEquals(result.get(0), YarnConfiguration.DEFAULT_NM_NONSECURE_MODE_LOCAL_USER);
Assert.assertEquals(result.get(1), "test");
Assert.assertEquals(result.get(2), "0" );
Assert.assertEquals(result.get(3),"application_0" );
Assert.assertEquals(result.get(4), "/bin/nmPrivateCTokensPath");
Assert.assertEquals(result.get(8), "-classpath" );
Assert.assertEquals(result.get(11), "-Xmx256m" );
Assert.assertEquals(result.get(12),"org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ContainerLocalizer" );
Assert.assertEquals(result.get(13), "test");
Assert.assertEquals(result.get(14), "application_0");
Assert.assertEquals(result.get(15),"12345" );
Assert.assertEquals(result.get(16),"localhost" );
Assert.assertEquals(result.get(17),"8040" );
} catch (InterruptedException e) {
LOG.error("Error:"+e.getMessage(),e);
Assert.fail();
}
}
@Test
public void testContainerLaunchError()
throws IOException, ContainerExecutionException {
// reinitialize executer
Configuration conf = new Configuration();
setupMockExecutor(MOCK_EXECUTOR_WITH_ERROR, conf);
conf.set(YarnConfiguration.NM_LOCAL_DIRS, "file:///bin/echo");
conf.set(YarnConfiguration.NM_LOG_DIRS, "file:///dev/null");
LinuxContainerExecutor exec;
LinuxContainerRuntime linuxContainerRuntime = new
DefaultLinuxContainerRuntime(PrivilegedOperationExecutor.getInstance
(conf));
linuxContainerRuntime.initialize(conf);
exec = new LinuxContainerExecutor(linuxContainerRuntime);
mockExec = spy(exec);
doAnswer(
new Answer() {
@Override
public Object answer(InvocationOnMock invocationOnMock)
throws Throwable {
String diagnostics = (String) invocationOnMock.getArguments()[0];
assertTrue("Invalid Diagnostics message: " + diagnostics,
diagnostics.contains("badcommand"));
return null;
}
}
).when(mockExec).logOutput(any(String.class));
dirsHandler = new LocalDirsHandlerService();
dirsHandler.init(conf);
mockExec.setConf(conf);
String appSubmitter = "nobody";
String cmd = String
.valueOf(PrivilegedOperation.RunAsUserCommand.LAUNCH_CONTAINER.getValue());
String appId = "APP_ID";
String containerId = "CONTAINER_ID";
Container container = mock(Container.class);
ContainerId cId = mock(ContainerId.class);
ContainerLaunchContext context = mock(ContainerLaunchContext.class);
HashMap<String, String> env = new HashMap<String, String>();
when(container.getContainerId()).thenReturn(cId);
when(container.getLaunchContext()).thenReturn(context);
doAnswer(
new Answer() {
@Override
public Object answer(InvocationOnMock invocationOnMock)
throws Throwable {
ContainerDiagnosticsUpdateEvent event =
(ContainerDiagnosticsUpdateEvent) invocationOnMock
.getArguments()[0];
assertTrue("Invalid Diagnostics message: " +
event.getDiagnosticsUpdate(),
event.getDiagnosticsUpdate().contains("badcommand"));
return null;
}
}
).when(container).handle(any(ContainerDiagnosticsUpdateEvent.class));
when(cId.toString()).thenReturn(containerId);
when(context.getEnvironment()).thenReturn(env);
Path scriptPath = new Path("file:///bin/echo");
Path tokensPath = new Path("file:///dev/null");
Path workDir = new Path("/tmp");
Path pidFile = new Path(workDir, "pid.txt");
mockExec.activateContainer(cId, pidFile);
int ret = mockExec.launchContainer(new ContainerStartContext.Builder()
.setContainer(container)
.setNmPrivateContainerScriptPath(scriptPath)
.setNmPrivateTokensPath(tokensPath)
.setUser(appSubmitter)
.setAppId(appId)
.setContainerWorkDir(workDir)
.setLocalDirs(dirsHandler.getLocalDirs())
.setLogDirs(dirsHandler.getLogDirs())
.build());
Assert.assertNotSame(0, ret);
assertEquals(Arrays.asList(YarnConfiguration.DEFAULT_NM_NONSECURE_MODE_LOCAL_USER,
appSubmitter, cmd, appId, containerId,
workDir.toString(), "/bin/echo", "/dev/null", pidFile.toString(),
StringUtils.join(PrivilegedOperation.LINUX_FILE_PATH_SEPARATOR,
dirsHandler.getLocalDirs()),
StringUtils.join(PrivilegedOperation.LINUX_FILE_PATH_SEPARATOR,
dirsHandler.getLogDirs()),
"cgroups=none"), readMockParams());
}
@Test
public void testInit() throws Exception {
mockExec.init();
assertEquals(Arrays.asList("--checksetup"), readMockParams());
}
@Test
public void testContainerKill() throws IOException {
String appSubmitter = "nobody";
String cmd = String.valueOf(
PrivilegedOperation.RunAsUserCommand.SIGNAL_CONTAINER.getValue());
ContainerExecutor.Signal signal = ContainerExecutor.Signal.QUIT;
String sigVal = String.valueOf(signal.getValue());
Container container = mock(Container.class);
ContainerId cId = mock(ContainerId.class);
ContainerLaunchContext context = mock(ContainerLaunchContext.class);
when(container.getContainerId()).thenReturn(cId);
when(container.getLaunchContext()).thenReturn(context);
mockExec.signalContainer(new ContainerSignalContext.Builder()
.setContainer(container)
.setUser(appSubmitter)
.setPid("1000")
.setSignal(signal)
.build());
assertEquals(Arrays.asList(YarnConfiguration.DEFAULT_NM_NONSECURE_MODE_LOCAL_USER,
appSubmitter, cmd, "1000", sigVal),
readMockParams());
}
@Test
public void testDeleteAsUser() throws IOException {
String appSubmitter = "nobody";
String cmd = String.valueOf(
PrivilegedOperation.RunAsUserCommand.DELETE_AS_USER.getValue());
Path dir = new Path("/tmp/testdir");
Path testFile = new Path("testfile");
Path baseDir0 = new Path("/grid/0/BaseDir");
Path baseDir1 = new Path("/grid/1/BaseDir");
mockExec.deleteAsUser(new DeletionAsUserContext.Builder()
.setUser(appSubmitter)
.setSubDir(dir)
.build());
assertEquals(
Arrays.asList(YarnConfiguration.DEFAULT_NM_NONSECURE_MODE_LOCAL_USER,
appSubmitter, cmd, "/tmp/testdir"),
readMockParams());
mockExec.deleteAsUser(new DeletionAsUserContext.Builder()
.setUser(appSubmitter)
.build());
assertEquals(
Arrays.asList(YarnConfiguration.DEFAULT_NM_NONSECURE_MODE_LOCAL_USER,
appSubmitter, cmd, ""),
readMockParams());
mockExec.deleteAsUser(new DeletionAsUserContext.Builder()
.setUser(appSubmitter)
.setSubDir(testFile)
.setBasedirs(baseDir0, baseDir1)
.build());
assertEquals(
Arrays.asList(YarnConfiguration.DEFAULT_NM_NONSECURE_MODE_LOCAL_USER,
appSubmitter, cmd, testFile.toString(), baseDir0.toString(),
baseDir1.toString()),
readMockParams());
mockExec.deleteAsUser(new DeletionAsUserContext.Builder()
.setUser(appSubmitter)
.setBasedirs(baseDir0, baseDir1)
.build());
assertEquals(
Arrays.asList(YarnConfiguration.DEFAULT_NM_NONSECURE_MODE_LOCAL_USER,
appSubmitter, cmd, "", baseDir0.toString(), baseDir1.toString()),
readMockParams());
;
Configuration conf = new Configuration();
setupMockExecutor(MOCK_EXECUTOR, conf);
mockExec.setConf(conf);
mockExec.deleteAsUser(new DeletionAsUserContext.Builder()
.setUser(appSubmitter)
.setSubDir(dir)
.build());
assertEquals(
Arrays.asList(YarnConfiguration.DEFAULT_NM_NONSECURE_MODE_LOCAL_USER,
appSubmitter, cmd, "/tmp/testdir"),
readMockParams());
mockExec.deleteAsUser(new DeletionAsUserContext.Builder()
.setUser(appSubmitter)
.build());
assertEquals(
Arrays.asList(YarnConfiguration.DEFAULT_NM_NONSECURE_MODE_LOCAL_USER,
appSubmitter, cmd, ""),
readMockParams());
mockExec.deleteAsUser(new DeletionAsUserContext.Builder()
.setUser(appSubmitter)
.setSubDir(testFile)
.setBasedirs(baseDir0, baseDir1)
.build());
assertEquals(
Arrays.asList(YarnConfiguration.DEFAULT_NM_NONSECURE_MODE_LOCAL_USER,
appSubmitter, cmd, testFile.toString(), baseDir0.toString(),
baseDir1.toString()),
readMockParams());
mockExec.deleteAsUser(new DeletionAsUserContext.Builder()
.setUser(appSubmitter)
.setBasedirs(baseDir0, baseDir1)
.build());
assertEquals(Arrays.asList(YarnConfiguration.DEFAULT_NM_NONSECURE_MODE_LOCAL_USER,
appSubmitter, cmd, "", baseDir0.toString(), baseDir1.toString()),
readMockParams());
}
}
| 18,313 | 36.838843 | 133 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestRPCFactories.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.nodemanager;
import java.net.InetSocketAddress;
import org.apache.hadoop.yarn.server.nodemanager.api.protocolrecords.LocalizerHeartbeatResponse;
import org.apache.hadoop.yarn.server.nodemanager.api.protocolrecords.LocalizerStatus;
import org.junit.Assert;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.ipc.Server;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
import org.apache.hadoop.yarn.factories.impl.pb.RpcClientFactoryPBImpl;
import org.apache.hadoop.yarn.factories.impl.pb.RpcServerFactoryPBImpl;
import org.apache.hadoop.yarn.server.nodemanager.api.LocalizationProtocol;
import org.junit.Test;
public class TestRPCFactories {
@Test
public void test() {
testPbServerFactory();
testPbClientFactory();
}
private void testPbServerFactory() {
InetSocketAddress addr = new InetSocketAddress(0);
Configuration conf = new Configuration();
LocalizationProtocol instance = new LocalizationProtocolTestImpl();
Server server = null;
try {
server =
RpcServerFactoryPBImpl.get().getServer(
LocalizationProtocol.class, instance, addr, conf, null, 1);
server.start();
} catch (YarnRuntimeException e) {
e.printStackTrace();
Assert.fail("Failed to create server");
} finally {
if (server != null) {
server.stop();
}
}
}
private void testPbClientFactory() {
InetSocketAddress addr = new InetSocketAddress(0);
System.err.println(addr.getHostName() + addr.getPort());
Configuration conf = new Configuration();
LocalizationProtocol instance = new LocalizationProtocolTestImpl();
Server server = null;
try {
server =
RpcServerFactoryPBImpl.get().getServer(
LocalizationProtocol.class, instance, addr, conf, null, 1);
server.start();
System.err.println(server.getListenerAddress());
System.err.println(NetUtils.getConnectAddress(server));
try {
LocalizationProtocol client = (LocalizationProtocol)
RpcClientFactoryPBImpl.get().getClient(
LocalizationProtocol.class, 1,
NetUtils.getConnectAddress(server), conf);
Assert.assertNotNull(client);
} catch (YarnRuntimeException e) {
e.printStackTrace();
Assert.fail("Failed to create client");
}
} catch (YarnRuntimeException e) {
e.printStackTrace();
Assert.fail("Failed to create server");
} finally {
server.stop();
}
}
public class LocalizationProtocolTestImpl implements LocalizationProtocol {
@Override
public LocalizerHeartbeatResponse heartbeat(LocalizerStatus status) {
return null;
}
}
}
| 3,616 | 31.294643 | 96 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/LocalRMInterface.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.nodemanager;
import java.io.IOException;
import java.nio.ByteBuffer;
import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.factories.RecordFactory;
import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
import org.apache.hadoop.yarn.server.api.ResourceTracker;
import org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatRequest;
import org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatResponse;
import org.apache.hadoop.yarn.server.api.protocolrecords.RegisterNodeManagerRequest;
import org.apache.hadoop.yarn.server.api.protocolrecords.RegisterNodeManagerResponse;
import org.apache.hadoop.yarn.server.api.protocolrecords.UnRegisterNodeManagerRequest;
import org.apache.hadoop.yarn.server.api.protocolrecords.UnRegisterNodeManagerResponse;
import org.apache.hadoop.yarn.server.api.records.MasterKey;
import org.apache.hadoop.yarn.server.api.records.impl.pb.MasterKeyPBImpl;
public class LocalRMInterface implements ResourceTracker {
private static final RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(null);
@Override
public RegisterNodeManagerResponse registerNodeManager(
RegisterNodeManagerRequest request) throws YarnException,
IOException {
RegisterNodeManagerResponse response = recordFactory.newRecordInstance(RegisterNodeManagerResponse.class);
MasterKey masterKey = new MasterKeyPBImpl();
masterKey.setKeyId(123);
masterKey.setBytes(ByteBuffer.wrap(new byte[] { new Integer(123)
.byteValue() }));
response.setContainerTokenMasterKey(masterKey);
response.setNMTokenMasterKey(masterKey);
return response;
}
@Override
public NodeHeartbeatResponse nodeHeartbeat(NodeHeartbeatRequest request)
throws YarnException, IOException {
NodeHeartbeatResponse response = recordFactory.newRecordInstance(NodeHeartbeatResponse.class);
return response;
}
@Override
public UnRegisterNodeManagerResponse unRegisterNodeManager(
UnRegisterNodeManagerRequest request) throws YarnException, IOException {
UnRegisterNodeManagerResponse response = recordFactory
.newRecordInstance(UnRegisterNodeManagerResponse.class);
return response;
}
}
| 3,061 | 42.742857 | 110 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLocalDirsHandlerService.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.nodemanager;
import java.io.File;
import java.io.IOException;
import org.apache.commons.io.FileUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.FileContext;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.service.Service.STATE;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
import org.apache.hadoop.yarn.server.nodemanager.metrics.NodeManagerMetrics;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
public class TestLocalDirsHandlerService {
private static final File testDir = new File("target",
TestDirectoryCollection.class.getName()).getAbsoluteFile();
private static final File testFile = new File(testDir, "testfile");
@Before
public void setup() throws IOException {
testDir.mkdirs();
testFile.createNewFile();
}
@After
public void teardown() {
FileUtil.fullyDelete(testDir);
}
@Test
public void testDirStructure() throws Exception {
Configuration conf = new YarnConfiguration();
String localDir1 = new File("file:///" + testDir, "localDir1").getPath();
conf.set(YarnConfiguration.NM_LOCAL_DIRS, localDir1);
String logDir1 = new File("file:///" + testDir, "logDir1").getPath();
conf.set(YarnConfiguration.NM_LOG_DIRS, logDir1);
LocalDirsHandlerService dirSvc = new LocalDirsHandlerService();
dirSvc.init(conf);
Assert.assertEquals(1, dirSvc.getLocalDirs().size());
dirSvc.close();
}
@Test
public void testValidPathsDirHandlerService() throws Exception {
Configuration conf = new YarnConfiguration();
String localDir1 = new File("file:///" + testDir, "localDir1").getPath();
String localDir2 = new File("hdfs:///" + testDir, "localDir2").getPath();
conf.set(YarnConfiguration.NM_LOCAL_DIRS, localDir1 + "," + localDir2);
String logDir1 = new File("file:///" + testDir, "logDir1").getPath();
conf.set(YarnConfiguration.NM_LOG_DIRS, logDir1);
LocalDirsHandlerService dirSvc = new LocalDirsHandlerService();
try {
dirSvc.init(conf);
Assert.fail("Service should have thrown an exception due to wrong URI");
} catch (YarnRuntimeException e) {
}
Assert.assertEquals("Service should not be inited",
STATE.STOPPED,
dirSvc.getServiceState());
dirSvc.close();
}
@Test
public void testGetFullDirs() throws Exception {
Configuration conf = new YarnConfiguration();
conf.set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY, "077");
FileContext localFs = FileContext.getLocalFSFileContext(conf);
String localDir1 = new File(testDir, "localDir1").getPath();
String localDir2 = new File(testDir, "localDir2").getPath();
String logDir1 = new File(testDir, "logDir1").getPath();
String logDir2 = new File(testDir, "logDir2").getPath();
Path localDir1Path = new Path(localDir1);
Path logDir1Path = new Path(logDir1);
FsPermission dirPermissions = new FsPermission((short) 0410);
localFs.mkdir(localDir1Path, dirPermissions, true);
localFs.mkdir(logDir1Path, dirPermissions, true);
conf.set(YarnConfiguration.NM_LOCAL_DIRS, localDir1 + "," + localDir2);
conf.set(YarnConfiguration.NM_LOG_DIRS, logDir1 + "," + logDir2);
conf.setFloat(YarnConfiguration.NM_MAX_PER_DISK_UTILIZATION_PERCENTAGE,
0.0f);
NodeManagerMetrics nm = NodeManagerMetrics.create();
LocalDirsHandlerService dirSvc = new LocalDirsHandlerService(nm);
dirSvc.init(conf);
Assert.assertEquals(0, dirSvc.getLocalDirs().size());
Assert.assertEquals(0, dirSvc.getLogDirs().size());
Assert.assertEquals(1, dirSvc.getDiskFullLocalDirs().size());
Assert.assertEquals(1, dirSvc.getDiskFullLogDirs().size());
// check the metrics
Assert.assertEquals(2, nm.getBadLocalDirs());
Assert.assertEquals(2, nm.getBadLogDirs());
Assert.assertEquals(0, nm.getGoodLocalDirsDiskUtilizationPerc());
Assert.assertEquals(0, nm.getGoodLogDirsDiskUtilizationPerc());
conf.setFloat(YarnConfiguration.NM_MAX_PER_DISK_UTILIZATION_PERCENTAGE,
100.0f);
nm = NodeManagerMetrics.create();
dirSvc = new LocalDirsHandlerService(nm);
dirSvc.init(conf);
Assert.assertEquals(1, dirSvc.getLocalDirs().size());
Assert.assertEquals(1, dirSvc.getLogDirs().size());
Assert.assertEquals(0, dirSvc.getDiskFullLocalDirs().size());
Assert.assertEquals(0, dirSvc.getDiskFullLogDirs().size());
// check the metrics
File dir = new File(localDir1);
int utilizationPerc =
(int) ((dir.getTotalSpace() - dir.getUsableSpace()) * 100 /
dir.getTotalSpace());
Assert.assertEquals(1, nm.getBadLocalDirs());
Assert.assertEquals(1, nm.getBadLogDirs());
Assert.assertEquals(utilizationPerc,
nm.getGoodLocalDirsDiskUtilizationPerc());
Assert
.assertEquals(utilizationPerc, nm.getGoodLogDirsDiskUtilizationPerc());
FileUtils.deleteDirectory(new File(localDir1));
FileUtils.deleteDirectory(new File(localDir2));
FileUtils.deleteDirectory(new File(logDir1));
FileUtils.deleteDirectory(new File(logDir2));
dirSvc.close();
}
}
| 6,206 | 40.10596 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNMAuditLogger.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.nodemanager;
import static org.junit.Assert.assertEquals;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
import java.net.InetAddress;
import java.net.InetSocketAddress;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.ipc.Server;
import org.apache.hadoop.ipc.TestRPC.TestImpl;
import org.apache.hadoop.ipc.TestRPC.TestProtocol;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.server.nodemanager.NMAuditLogger.Keys;
import org.junit.Before;
import org.junit.Test;
/**
* Tests {@link NMAuditLogger}.
*/
public class TestNMAuditLogger {
private static final String USER = "test";
private static final String OPERATION = "oper";
private static final String TARGET = "tgt";
private static final String DESC = "description of an audit log";
private static final ApplicationId APPID = mock(ApplicationId.class);
private static final ContainerId CONTAINERID = mock(ContainerId.class);
@Before
public void setUp() throws Exception {
when(APPID.toString()).thenReturn("app_1");
when(CONTAINERID.toString()).thenReturn("container_1");
}
/**
* Test the AuditLog format with key-val pair.
*/
@Test
public void testKeyValLogFormat() throws Exception {
StringBuilder actLog = new StringBuilder();
StringBuilder expLog = new StringBuilder();
// add the first k=v pair and check
NMAuditLogger.start(Keys.USER, USER, actLog);
expLog.append("USER=test");
assertEquals(expLog.toString(), actLog.toString());
// append another k1=v1 pair to already added k=v and test
NMAuditLogger.add(Keys.OPERATION, OPERATION, actLog);
expLog.append("\tOPERATION=oper");
assertEquals(expLog.toString(), actLog.toString());
// append another k1=null pair and test
NMAuditLogger.add(Keys.APPID, (String)null, actLog);
expLog.append("\tAPPID=null");
assertEquals(expLog.toString(), actLog.toString());
// now add the target and check of the final string
NMAuditLogger.add(Keys.TARGET, TARGET, actLog);
expLog.append("\tTARGET=tgt");
assertEquals(expLog.toString(), actLog.toString());
}
/**
* Test the AuditLog format for successful events.
*/
private void testSuccessLogFormatHelper(boolean checkIP,
ApplicationId appId, ContainerId containerId) {
// check without the IP
String sLog = NMAuditLogger.createSuccessLog(USER, OPERATION, TARGET,
appId, containerId);
StringBuilder expLog = new StringBuilder();
expLog.append("USER=test\t");
if (checkIP) {
InetAddress ip = Server.getRemoteIp();
expLog.append(Keys.IP.name() + "=" + ip.getHostAddress() + "\t");
}
expLog.append("OPERATION=oper\tTARGET=tgt\tRESULT=SUCCESS");
if (appId != null) {
expLog.append("\tAPPID=app_1");
}
if (containerId != null) {
expLog.append("\tCONTAINERID=container_1");
}
assertEquals(expLog.toString(), sLog);
}
/**
* Test the AuditLog format for successful events passing nulls.
*/
private void testSuccessLogNulls(boolean checkIP) {
String sLog = NMAuditLogger.createSuccessLog(null, null, null,
null, null);
StringBuilder expLog = new StringBuilder();
expLog.append("USER=null\t");
if (checkIP) {
InetAddress ip = Server.getRemoteIp();
expLog.append(Keys.IP.name() + "=" + ip.getHostAddress() + "\t");
}
expLog.append("OPERATION=null\tTARGET=null\tRESULT=SUCCESS");
assertEquals(expLog.toString(), sLog);
}
/**
* Test the AuditLog format for successful events with the various
* parameters.
*/
private void testSuccessLogFormat(boolean checkIP) {
testSuccessLogFormatHelper(checkIP, null, null);
testSuccessLogFormatHelper(checkIP, APPID, null);
testSuccessLogFormatHelper(checkIP, null, CONTAINERID);
testSuccessLogFormatHelper(checkIP, APPID, CONTAINERID);
testSuccessLogNulls(checkIP);
}
/**
* Test the AuditLog format for failure events.
*/
private void testFailureLogFormatHelper(boolean checkIP, ApplicationId appId,
ContainerId containerId) {
String fLog =
NMAuditLogger.createFailureLog(USER, OPERATION, TARGET, DESC, appId,
containerId);
StringBuilder expLog = new StringBuilder();
expLog.append("USER=test\t");
if (checkIP) {
InetAddress ip = Server.getRemoteIp();
expLog.append(Keys.IP.name() + "=" + ip.getHostAddress() + "\t");
}
expLog.append("OPERATION=oper\tTARGET=tgt\tRESULT=FAILURE\t");
expLog.append("DESCRIPTION=description of an audit log");
if (appId != null) {
expLog.append("\tAPPID=app_1");
}
if (containerId != null) {
expLog.append("\tCONTAINERID=container_1");
}
assertEquals(expLog.toString(), fLog);
}
/**
* Test the AuditLog format for failure events with the various
* parameters.
*/
private void testFailureLogFormat(boolean checkIP) {
testFailureLogFormatHelper(checkIP, null, null);
testFailureLogFormatHelper(checkIP, APPID, null);
testFailureLogFormatHelper(checkIP, null, CONTAINERID);
testFailureLogFormatHelper(checkIP, APPID, CONTAINERID);
}
/**
* Test {@link NMAuditLogger} without IP set.
*/
@Test
public void testNMAuditLoggerWithoutIP() throws Exception {
// test without ip
testSuccessLogFormat(false);
testFailureLogFormat(false);
}
/**
* A special extension of {@link TestImpl} RPC server with
* {@link TestImpl#ping()} testing the audit logs.
*/
private class MyTestRPCServer extends TestImpl {
@Override
public void ping() {
// test with ip set
testSuccessLogFormat(true);
testFailureLogFormat(true);
}
}
/**
* Test {@link NMAuditLogger} with IP set.
*/
@Test
public void testNMAuditLoggerWithIP() throws Exception {
Configuration conf = new Configuration();
// start the IPC server
Server server = new RPC.Builder(conf).setProtocol(TestProtocol.class)
.setInstance(new MyTestRPCServer()).setBindAddress("0.0.0.0")
.setPort(0).setNumHandlers(5).setVerbose(true).build();
server.start();
InetSocketAddress addr = NetUtils.getConnectAddress(server);
// Make a client connection and test the audit log
TestProtocol proxy = (TestProtocol)RPC.getProxy(TestProtocol.class,
TestProtocol.versionID, addr, conf);
// Start the testcase
proxy.ping();
server.stop();
}
}
| 7,461 | 32.3125 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestDefaultContainerExecutor.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.nodemanager;
import static org.apache.hadoop.fs.CreateFlag.CREATE;
import static org.apache.hadoop.fs.CreateFlag.OVERWRITE;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
import static org.mockito.Mockito.spy;
import static org.mockito.Mockito.any;
import static org.mockito.Mockito.doAnswer;
import static org.junit.Assert.assertTrue;
import java.io.BufferedWriter;
import java.io.DataOutputStream;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.FileReader;
import java.io.FileWriter;
import java.io.InputStream;
import java.io.IOException;
import java.io.LineNumberReader;
import java.net.InetSocketAddress;
import java.util.ArrayList;
import java.util.EnumSet;
import java.util.HashMap;
import java.util.LinkedList;
import java.util.List;
import java.util.Random;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.AbstractFileSystem;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileContext;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.FsStatus;
import org.apache.hadoop.fs.Options.CreateOpts;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.io.DataInputBuffer;
import org.apache.hadoop.io.DataOutputBuffer;
import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.util.Progressable;
import org.apache.hadoop.util.Shell;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerDiagnosticsUpdateEvent;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ContainerLocalizer;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.FakeFSDataInputStream;
import org.apache.hadoop.yarn.server.nodemanager.executor.ContainerStartContext;
import org.apache.hadoop.yarn.server.nodemanager.executor.DeletionAsUserContext;
import org.apache.hadoop.yarn.server.nodemanager.executor.LocalizerStartContext;
import org.junit.After;
import org.junit.AfterClass;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.mockito.invocation.InvocationOnMock;
import org.mockito.stubbing.Answer;
public class TestDefaultContainerExecutor {
/*
// XXX FileContext cannot be mocked to do this
static FSDataInputStream getRandomStream(Random r, int len)
throws IOException {
byte[] bytes = new byte[len];
r.nextBytes(bytes);
DataInputBuffer buf = new DataInputBuffer();
buf.reset(bytes, 0, bytes.length);
return new FSDataInputStream(new FakeFSDataInputStream(buf));
}
class PathEndsWith extends ArgumentMatcher<Path> {
final String suffix;
PathEndsWith(String suffix) {
this.suffix = suffix;
}
@Override
public boolean matches(Object o) {
return
suffix.equals(((Path)o).getName());
}
}
DataOutputBuffer mockStream(
AbstractFileSystem spylfs, Path p, Random r, int len)
throws IOException {
DataOutputBuffer dob = new DataOutputBuffer();
doReturn(getRandomStream(r, len)).when(spylfs).open(p);
doReturn(new FileStatus(len, false, -1, -1L, -1L, p)).when(
spylfs).getFileStatus(argThat(new PathEndsWith(p.getName())));
doReturn(new FSDataOutputStream(dob)).when(spylfs).createInternal(
argThat(new PathEndsWith(p.getName())),
eq(EnumSet.of(OVERWRITE)),
Matchers.<FsPermission>anyObject(), anyInt(), anyShort(), anyLong(),
Matchers.<Progressable>anyObject(), anyInt(), anyBoolean());
return dob;
}
*/
private static Path BASE_TMP_PATH = new Path("target",
TestDefaultContainerExecutor.class.getSimpleName());
@AfterClass
public static void deleteTmpFiles() throws IOException {
FileContext lfs = FileContext.getLocalFSFileContext();
try {
lfs.delete(BASE_TMP_PATH, true);
} catch (FileNotFoundException e) {
}
}
byte[] createTmpFile(Path dst, Random r, int len)
throws IOException {
// use unmodified local context
FileContext lfs = FileContext.getLocalFSFileContext();
dst = lfs.makeQualified(dst);
lfs.mkdir(dst.getParent(), null, true);
byte[] bytes = new byte[len];
FSDataOutputStream out = null;
try {
out = lfs.create(dst, EnumSet.of(CREATE, OVERWRITE));
r.nextBytes(bytes);
out.write(bytes);
} finally {
if (out != null) out.close();
}
return bytes;
}
@Test
public void testDirPermissions() throws Exception {
deleteTmpFiles();
final String user = "somebody";
final String appId = "app_12345_123";
final FsPermission userCachePerm = new FsPermission(
DefaultContainerExecutor.USER_PERM);
final FsPermission appCachePerm = new FsPermission(
DefaultContainerExecutor.APPCACHE_PERM);
final FsPermission fileCachePerm = new FsPermission(
DefaultContainerExecutor.FILECACHE_PERM);
final FsPermission appDirPerm = new FsPermission(
DefaultContainerExecutor.APPDIR_PERM);
final FsPermission logDirPerm = new FsPermission(
DefaultContainerExecutor.LOGDIR_PERM);
List<String> localDirs = new ArrayList<String>();
localDirs.add(new Path(BASE_TMP_PATH, "localDirA").toString());
localDirs.add(new Path(BASE_TMP_PATH, "localDirB").toString());
List<String> logDirs = new ArrayList<String>();
logDirs.add(new Path(BASE_TMP_PATH, "logDirA").toString());
logDirs.add(new Path(BASE_TMP_PATH, "logDirB").toString());
Configuration conf = new Configuration();
conf.set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY, "077");
FileContext lfs = FileContext.getLocalFSFileContext(conf);
DefaultContainerExecutor executor = new DefaultContainerExecutor(lfs);
executor.init();
try {
executor.createUserLocalDirs(localDirs, user);
executor.createUserCacheDirs(localDirs, user);
executor.createAppDirs(localDirs, user, appId);
for (String dir : localDirs) {
FileStatus stats = lfs.getFileStatus(
new Path(new Path(dir, ContainerLocalizer.USERCACHE), user));
Assert.assertEquals(userCachePerm, stats.getPermission());
}
for (String dir : localDirs) {
Path userCachePath = new Path(
new Path(dir, ContainerLocalizer.USERCACHE), user);
Path appCachePath = new Path(userCachePath,
ContainerLocalizer.APPCACHE);
FileStatus stats = lfs.getFileStatus(appCachePath);
Assert.assertEquals(appCachePerm, stats.getPermission());
stats = lfs.getFileStatus(
new Path(userCachePath, ContainerLocalizer.FILECACHE));
Assert.assertEquals(fileCachePerm, stats.getPermission());
stats = lfs.getFileStatus(new Path(appCachePath, appId));
Assert.assertEquals(appDirPerm, stats.getPermission());
}
executor.createAppLogDirs(appId, logDirs, user);
for (String dir : logDirs) {
FileStatus stats = lfs.getFileStatus(new Path(dir, appId));
Assert.assertEquals(logDirPerm, stats.getPermission());
}
} finally {
deleteTmpFiles();
}
}
@Test
public void testContainerLaunchError()
throws IOException, InterruptedException {
if (Shell.WINDOWS) {
BASE_TMP_PATH =
new Path(new File("target").getAbsolutePath(),
TestDefaultContainerExecutor.class.getSimpleName());
}
Path localDir = new Path(BASE_TMP_PATH, "localDir");
List<String> localDirs = new ArrayList<String>();
localDirs.add(localDir.toString());
List<String> logDirs = new ArrayList<String>();
Path logDir = new Path(BASE_TMP_PATH, "logDir");
logDirs.add(logDir.toString());
Configuration conf = new Configuration();
conf.set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY, "077");
conf.set(YarnConfiguration.NM_LOCAL_DIRS, localDir.toString());
conf.set(YarnConfiguration.NM_LOG_DIRS, logDir.toString());
FileContext lfs = FileContext.getLocalFSFileContext(conf);
DefaultContainerExecutor mockExec = spy(new DefaultContainerExecutor(lfs));
mockExec.setConf(conf);
doAnswer(
new Answer() {
@Override
public Object answer(InvocationOnMock invocationOnMock)
throws Throwable {
String diagnostics = (String) invocationOnMock.getArguments()[0];
assertTrue("Invalid Diagnostics message: " + diagnostics,
diagnostics.contains("No such file or directory"));
return null;
}
}
).when(mockExec).logOutput(any(String.class));
String appSubmitter = "nobody";
String appId = "APP_ID";
String containerId = "CONTAINER_ID";
Container container = mock(Container.class);
ContainerId cId = mock(ContainerId.class);
ContainerLaunchContext context = mock(ContainerLaunchContext.class);
HashMap<String, String> env = new HashMap<String, String>();
when(container.getContainerId()).thenReturn(cId);
when(container.getLaunchContext()).thenReturn(context);
try {
doAnswer(new Answer() {
@Override
public Object answer(InvocationOnMock invocationOnMock)
throws Throwable {
ContainerDiagnosticsUpdateEvent event =
(ContainerDiagnosticsUpdateEvent) invocationOnMock
.getArguments()[0];
assertTrue("Invalid Diagnostics message: "
+ event.getDiagnosticsUpdate(),
event.getDiagnosticsUpdate().contains("No such file or directory")
);
return null;
}
}).when(container).handle(any(ContainerDiagnosticsUpdateEvent.class));
when(cId.toString()).thenReturn(containerId);
when(cId.getApplicationAttemptId()).thenReturn(
ApplicationAttemptId.newInstance(ApplicationId.newInstance(0, 1), 0));
when(context.getEnvironment()).thenReturn(env);
mockExec.createUserLocalDirs(localDirs, appSubmitter);
mockExec.createUserCacheDirs(localDirs, appSubmitter);
mockExec.createAppDirs(localDirs, appSubmitter, appId);
mockExec.createAppLogDirs(appId, logDirs, appSubmitter);
Path scriptPath = new Path("file:///bin/echo");
Path tokensPath = new Path("file:///dev/null");
if (Shell.WINDOWS) {
File tmp = new File(BASE_TMP_PATH.toString(), "test_echo.cmd");
BufferedWriter output = new BufferedWriter(new FileWriter(tmp));
output.write("Exit 1");
output.write("Echo No such file or directory 1>&2");
output.close();
scriptPath = new Path(tmp.getAbsolutePath());
tmp = new File(BASE_TMP_PATH.toString(), "tokens");
tmp.createNewFile();
tokensPath = new Path(tmp.getAbsolutePath());
}
Path workDir = localDir;
Path pidFile = new Path(workDir, "pid.txt");
mockExec.init();
mockExec.activateContainer(cId, pidFile);
int ret = mockExec.launchContainer(new ContainerStartContext.Builder()
.setContainer(container)
.setNmPrivateContainerScriptPath(scriptPath)
.setNmPrivateTokensPath(tokensPath)
.setUser(appSubmitter)
.setAppId(appId)
.setContainerWorkDir(workDir)
.setLocalDirs(localDirs)
.setLogDirs(logDirs)
.build());
Assert.assertNotSame(0, ret);
} finally {
mockExec.deleteAsUser(new DeletionAsUserContext.Builder()
.setUser(appSubmitter)
.setSubDir(localDir)
.build());
mockExec.deleteAsUser(new DeletionAsUserContext.Builder()
.setUser(appSubmitter)
.setSubDir(logDir)
.build());
}
}
@Test(timeout = 30000)
public void testStartLocalizer()
throws IOException, InterruptedException {
InetSocketAddress localizationServerAddress;
final Path firstDir = new Path(BASE_TMP_PATH, "localDir1");
List<String> localDirs = new ArrayList<String>();
final Path secondDir = new Path(BASE_TMP_PATH, "localDir2");
List<String> logDirs = new ArrayList<String>();
final Path logDir = new Path(BASE_TMP_PATH, "logDir");
final Path tokenDir = new Path(BASE_TMP_PATH, "tokenDir");
FsPermission perms = new FsPermission((short)0770);
Configuration conf = new Configuration();
localizationServerAddress = conf.getSocketAddr(
YarnConfiguration.NM_BIND_HOST,
YarnConfiguration.NM_LOCALIZER_ADDRESS,
YarnConfiguration.DEFAULT_NM_LOCALIZER_ADDRESS,
YarnConfiguration.DEFAULT_NM_LOCALIZER_PORT);
final FileContext mockLfs = spy(FileContext.getLocalFSFileContext(conf));
final FileContext.Util mockUtil = spy(mockLfs.util());
doAnswer(new Answer() {
@Override
public Object answer(InvocationOnMock invocationOnMock)
throws Throwable {
return mockUtil;
}
}).when(mockLfs).util();
doAnswer(new Answer() {
@Override
public Object answer(InvocationOnMock invocationOnMock)
throws Throwable {
Path dest = (Path) invocationOnMock.getArguments()[1];
if (dest.toString().contains(firstDir.toString())) {
// throw an Exception when copy token to the first local dir
// to simulate no space on the first drive
throw new IOException("No space on this drive " +
dest.toString());
} else {
// copy token to the second local dir
DataOutputStream tokenOut = null;
try {
Credentials credentials = new Credentials();
tokenOut = mockLfs.create(dest,
EnumSet.of(CREATE, OVERWRITE));
credentials.writeTokenStorageToStream(tokenOut);
} finally {
if (tokenOut != null) {
tokenOut.close();
}
}
}
return null;
}
}).when(mockUtil).copy(any(Path.class), any(Path.class));
doAnswer(new Answer() {
@Override
public Object answer(InvocationOnMock invocationOnMock)
throws Throwable {
Path p = (Path) invocationOnMock.getArguments()[0];
// let second local directory return more free space than
// first local directory
if (p.toString().contains(firstDir.toString())) {
return new FsStatus(2000, 2000, 0);
} else {
return new FsStatus(1000, 0, 1000);
}
}
}).when(mockLfs).getFsStatus(any(Path.class));
DefaultContainerExecutor mockExec = spy(new DefaultContainerExecutor(
mockLfs));
mockExec.setConf(conf);
localDirs.add(mockLfs.makeQualified(firstDir).toString());
localDirs.add(mockLfs.makeQualified(secondDir).toString());
logDirs.add(mockLfs.makeQualified(logDir).toString());
conf.setStrings(YarnConfiguration.NM_LOCAL_DIRS,
localDirs.toArray(new String[localDirs.size()]));
conf.set(YarnConfiguration.NM_LOG_DIRS, logDir.toString());
mockLfs.mkdir(tokenDir, perms, true);
Path nmPrivateCTokensPath = new Path(tokenDir, "test.tokens");
String appSubmitter = "nobody";
String appId = "APP_ID";
String locId = "LOC_ID";
LocalDirsHandlerService dirsHandler = mock(LocalDirsHandlerService.class);
when(dirsHandler.getLocalDirs()).thenReturn(localDirs);
when(dirsHandler.getLogDirs()).thenReturn(logDirs);
try {
mockExec.startLocalizer(new LocalizerStartContext.Builder()
.setNmPrivateContainerTokens(nmPrivateCTokensPath)
.setNmAddr(localizationServerAddress)
.setUser(appSubmitter)
.setAppId(appId)
.setLocId(locId)
.setDirsHandler(dirsHandler)
.build());
} catch (IOException e) {
Assert.fail("StartLocalizer failed to copy token file " + e);
} finally {
mockExec.deleteAsUser(new DeletionAsUserContext.Builder()
.setUser(appSubmitter)
.setSubDir(firstDir)
.build());
mockExec.deleteAsUser(new DeletionAsUserContext.Builder()
.setUser(appSubmitter)
.setSubDir(secondDir)
.build());
mockExec.deleteAsUser(new DeletionAsUserContext.Builder()
.setUser(appSubmitter)
.setSubDir(logDir)
.build());
deleteTmpFiles();
}
}
// @Test
// public void testInit() throws IOException, InterruptedException {
// Configuration conf = new Configuration();
// AbstractFileSystem spylfs =
// spy(FileContext.getLocalFSFileContext().getDefaultFileSystem());
// // don't actually create dirs
// //doNothing().when(spylfs).mkdir(Matchers.<Path>anyObject(),
// // Matchers.<FsPermission>anyObject(), anyBoolean());
// FileContext lfs = FileContext.getFileContext(spylfs, conf);
//
// Path basedir = new Path("target",
// TestDefaultContainerExecutor.class.getSimpleName());
// List<String> localDirs = new ArrayList<String>();
// List<Path> localPaths = new ArrayList<Path>();
// for (int i = 0; i < 4; ++i) {
// Path p = new Path(basedir, i + "");
// lfs.mkdir(p, null, true);
// localPaths.add(p);
// localDirs.add(p.toString());
// }
// final String user = "yak";
// final String appId = "app_RM_0";
// final Path logDir = new Path(basedir, "logs");
// final Path nmLocal = new Path(basedir, "nmPrivate/" + user + "/" + appId);
// final InetSocketAddress nmAddr = new InetSocketAddress("foobar", 8040);
// System.out.println("NMLOCAL: " + nmLocal);
// Random r = new Random();
//
// /*
// // XXX FileContext cannot be reasonably mocked to do this
// // mock jobFiles copy
// long fileSeed = r.nextLong();
// r.setSeed(fileSeed);
// System.out.println("SEED: " + seed);
// Path fileCachePath = new Path(nmLocal, ApplicationLocalizer.FILECACHE_FILE);
// DataOutputBuffer fileCacheBytes = mockStream(spylfs, fileCachePath, r, 512);
//
// // mock jobTokens copy
// long jobSeed = r.nextLong();
// r.setSeed(jobSeed);
// System.out.println("SEED: " + seed);
// Path jobTokenPath = new Path(nmLocal, ApplicationLocalizer.JOBTOKEN_FILE);
// DataOutputBuffer jobTokenBytes = mockStream(spylfs, jobTokenPath, r, 512);
// */
//
// // create jobFiles
// long fileSeed = r.nextLong();
// r.setSeed(fileSeed);
// System.out.println("SEED: " + fileSeed);
// Path fileCachePath = new Path(nmLocal, ApplicationLocalizer.FILECACHE_FILE);
// byte[] fileCacheBytes = createTmpFile(fileCachePath, r, 512);
//
// // create jobTokens
// long jobSeed = r.nextLong();
// r.setSeed(jobSeed);
// System.out.println("SEED: " + jobSeed);
// Path jobTokenPath = new Path(nmLocal, ApplicationLocalizer.JOBTOKEN_FILE);
// byte[] jobTokenBytes = createTmpFile(jobTokenPath, r, 512);
//
// DefaultContainerExecutor dce = new DefaultContainerExecutor(lfs);
// Localization mockLocalization = mock(Localization.class);
// ApplicationLocalizer spyLocalizer =
// spy(new ApplicationLocalizer(lfs, user, appId, logDir,
// localPaths));
// // ignore cache localization
// doNothing().when(spyLocalizer).localizeFiles(
// Matchers.<Localization>anyObject(), Matchers.<Path>anyObject());
// Path workingDir = lfs.getWorkingDirectory();
// dce.initApplication(spyLocalizer, nmLocal, mockLocalization, localPaths);
// lfs.setWorkingDirectory(workingDir);
//
// for (Path localdir : localPaths) {
// Path userdir = lfs.makeQualified(new Path(localdir,
// new Path(ApplicationLocalizer.USERCACHE, user)));
// // $localdir/$user
// verify(spylfs).mkdir(userdir,
// new FsPermission(ApplicationLocalizer.USER_PERM), true);
// // $localdir/$user/appcache
// Path jobdir = new Path(userdir, ApplicationLocalizer.appcache);
// verify(spylfs).mkdir(jobdir,
// new FsPermission(ApplicationLocalizer.appcache_PERM), true);
// // $localdir/$user/filecache
// Path filedir = new Path(userdir, ApplicationLocalizer.FILECACHE);
// verify(spylfs).mkdir(filedir,
// new FsPermission(ApplicationLocalizer.FILECACHE_PERM), true);
// // $localdir/$user/appcache/$appId
// Path appdir = new Path(jobdir, appId);
// verify(spylfs).mkdir(appdir,
// new FsPermission(ApplicationLocalizer.APPDIR_PERM), true);
// // $localdir/$user/appcache/$appId/work
// Path workdir = new Path(appdir, ApplicationLocalizer.WORKDIR);
// verify(spylfs, atMost(1)).mkdir(workdir, FsPermission.getDefault(), true);
// }
// // $logdir/$appId
// Path logdir = new Path(lfs.makeQualified(logDir), appId);
// verify(spylfs).mkdir(logdir,
// new FsPermission(ApplicationLocalizer.LOGDIR_PERM), true);
// }
}
| 22,038 | 38.70991 | 108 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestDeletionService.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.nodemanager;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.Random;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileContext;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.UnsupportedFileSystemException;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.server.nodemanager.DeletionService.FileDeletionTask;
import org.apache.hadoop.yarn.server.nodemanager.executor.DeletionAsUserContext;
import org.apache.hadoop.yarn.server.nodemanager.recovery.NMMemoryStateStoreService;
import org.junit.AfterClass;
import org.junit.Test;
import org.mockito.Mockito;
public class TestDeletionService {
private static final FileContext lfs = getLfs();
private static final FileContext getLfs() {
try {
return FileContext.getLocalFSFileContext();
} catch (UnsupportedFileSystemException e) {
throw new RuntimeException(e);
}
}
private static final Path base =
lfs.makeQualified(new Path("target", TestDeletionService.class.getName()));
@AfterClass
public static void removeBase() throws IOException {
lfs.delete(base, true);
}
public List<Path> buildDirs(Random r, Path root, int numpaths)
throws IOException {
ArrayList<Path> ret = new ArrayList<Path>();
for (int i = 0; i < numpaths; ++i) {
Path p = root;
long name = r.nextLong();
do {
p = new Path(p, "" + name);
name = r.nextLong();
} while (0 == (name % 2));
ret.add(p);
}
return ret;
}
public void createDirs(Path base, List<Path> dirs) throws IOException {
for (Path dir : dirs) {
lfs.mkdir(new Path(base, dir), null, true);
}
}
static class FakeDefaultContainerExecutor extends DefaultContainerExecutor {
@Override
public void deleteAsUser(DeletionAsUserContext ctx)
throws IOException, InterruptedException {
String user = ctx.getUser();
Path subDir = ctx.getSubDir();
List<Path> basedirs = ctx.getBasedirs();
if ((Long.parseLong(subDir.getName()) % 2) == 0) {
assertNull(user);
} else {
assertEquals("dingo", user);
}
DeletionAsUserContext.Builder builder = new DeletionAsUserContext
.Builder()
.setUser(user)
.setSubDir(subDir);
if (basedirs != null) {
builder.setBasedirs(basedirs.toArray(new Path[basedirs.size()]));
}
super.deleteAsUser(builder.build());
assertFalse(lfs.util().exists(subDir));
}
}
@Test
public void testAbsDelete() throws Exception {
Random r = new Random();
long seed = r.nextLong();
r.setSeed(seed);
System.out.println("SEED: " + seed);
List<Path> dirs = buildDirs(r, base, 20);
createDirs(new Path("."), dirs);
FakeDefaultContainerExecutor exec = new FakeDefaultContainerExecutor();
Configuration conf = new Configuration();
exec.setConf(conf);
DeletionService del = new DeletionService(exec);
del.init(conf);
del.start();
try {
for (Path p : dirs) {
del.delete((Long.parseLong(p.getName()) % 2) == 0 ? null : "dingo",
p, null);
}
int msecToWait = 20 * 1000;
for (Path p : dirs) {
while (msecToWait > 0 && lfs.util().exists(p)) {
Thread.sleep(100);
msecToWait -= 100;
}
assertFalse(lfs.util().exists(p));
}
} finally {
del.stop();
}
}
@Test
public void testRelativeDelete() throws Exception {
Random r = new Random();
long seed = r.nextLong();
r.setSeed(seed);
System.out.println("SEED: " + seed);
List<Path> baseDirs = buildDirs(r, base, 4);
createDirs(new Path("."), baseDirs);
List<Path> content = buildDirs(r, new Path("."), 10);
for (Path b : baseDirs) {
createDirs(b, content);
}
DeletionService del =
new DeletionService(new FakeDefaultContainerExecutor());
try {
del.init(new Configuration());
del.start();
for (Path p : content) {
assertTrue(lfs.util().exists(new Path(baseDirs.get(0), p)));
del.delete((Long.parseLong(p.getName()) % 2) == 0 ? null : "dingo",
p, baseDirs.toArray(new Path[4]));
}
int msecToWait = 20 * 1000;
for (Path p : baseDirs) {
for (Path q : content) {
Path fp = new Path(p, q);
while (msecToWait > 0 && lfs.util().exists(fp)) {
Thread.sleep(100);
msecToWait -= 100;
}
assertFalse(lfs.util().exists(fp));
}
}
} finally {
del.stop();
}
}
@Test
public void testNoDelete() throws Exception {
Random r = new Random();
long seed = r.nextLong();
r.setSeed(seed);
System.out.println("SEED: " + seed);
List<Path> dirs = buildDirs(r, base, 20);
createDirs(new Path("."), dirs);
FakeDefaultContainerExecutor exec = new FakeDefaultContainerExecutor();
Configuration conf = new Configuration();
conf.setInt(YarnConfiguration.DEBUG_NM_DELETE_DELAY_SEC, -1);
exec.setConf(conf);
DeletionService del = new DeletionService(exec);
try {
del.init(conf);
del.start();
for (Path p : dirs) {
del.delete((Long.parseLong(p.getName()) % 2) == 0 ? null : "dingo", p,
null);
}
int msecToWait = 20 * 1000;
for (Path p : dirs) {
while (msecToWait > 0 && lfs.util().exists(p)) {
Thread.sleep(100);
msecToWait -= 100;
}
assertTrue(lfs.util().exists(p));
}
} finally {
del.stop();
}
}
@Test
public void testStopWithDelayedTasks() throws Exception {
DeletionService del = new DeletionService(Mockito.mock(ContainerExecutor.class));
Configuration conf = new YarnConfiguration();
conf.setInt(YarnConfiguration.DEBUG_NM_DELETE_DELAY_SEC, 60);
try {
del.init(conf);
del.start();
del.delete("dingo", new Path("/does/not/exist"));
} finally {
del.stop();
}
assertTrue(del.isTerminated());
}
@Test (timeout=60000)
public void testFileDeletionTaskDependency() throws Exception {
FakeDefaultContainerExecutor exec = new FakeDefaultContainerExecutor();
Configuration conf = new Configuration();
exec.setConf(conf);
DeletionService del = new DeletionService(exec);
del.init(conf);
del.start();
try {
Random r = new Random();
long seed = r.nextLong();
r.setSeed(seed);
System.out.println("SEED: " + seed);
List<Path> dirs = buildDirs(r, base, 2);
createDirs(new Path("."), dirs);
// first we will try to delete sub directories which are present. This
// should then trigger parent directory to be deleted.
List<Path> subDirs = buildDirs(r, dirs.get(0), 2);
FileDeletionTask dependentDeletionTask =
del.createFileDeletionTask(null, dirs.get(0), new Path[] {});
List<FileDeletionTask> deletionTasks = new ArrayList<FileDeletionTask>();
for (Path subDir : subDirs) {
FileDeletionTask deletionTask =
del.createFileDeletionTask(null, null, new Path[] { subDir });
deletionTask.addFileDeletionTaskDependency(dependentDeletionTask);
deletionTasks.add(deletionTask);
}
for (FileDeletionTask task : deletionTasks) {
del.scheduleFileDeletionTask(task);
}
int msecToWait = 20 * 1000;
while (msecToWait > 0 && (lfs.util().exists(dirs.get(0)))) {
Thread.sleep(100);
msecToWait -= 100;
}
assertFalse(lfs.util().exists(dirs.get(0)));
// Now we will try to delete sub directories; one of the deletion task we
// will mark as failure and then parent directory should not be deleted.
subDirs = buildDirs(r, dirs.get(1), 2);
subDirs.add(new Path(dirs.get(1), "absentFile"));
dependentDeletionTask =
del.createFileDeletionTask(null, dirs.get(1), new Path[] {});
deletionTasks = new ArrayList<FileDeletionTask>();
for (Path subDir : subDirs) {
FileDeletionTask deletionTask =
del.createFileDeletionTask(null, null, new Path[] { subDir });
deletionTask.addFileDeletionTaskDependency(dependentDeletionTask);
deletionTasks.add(deletionTask);
}
// marking one of the tasks as a failure.
deletionTasks.get(2).setSuccess(false);
for (FileDeletionTask task : deletionTasks) {
del.scheduleFileDeletionTask(task);
}
msecToWait = 20 * 1000;
while (msecToWait > 0
&& (lfs.util().exists(subDirs.get(0)) || lfs.util().exists(
subDirs.get(1)))) {
Thread.sleep(100);
msecToWait -= 100;
}
assertTrue(lfs.util().exists(dirs.get(1)));
} finally {
del.stop();
}
}
@Test
public void testRecovery() throws Exception {
Random r = new Random();
long seed = r.nextLong();
r.setSeed(seed);
System.out.println("SEED: " + seed);
List<Path> baseDirs = buildDirs(r, base, 4);
createDirs(new Path("."), baseDirs);
List<Path> content = buildDirs(r, new Path("."), 10);
for (Path b : baseDirs) {
createDirs(b, content);
}
Configuration conf = new YarnConfiguration();
conf.setBoolean(YarnConfiguration.NM_RECOVERY_ENABLED, true);
conf.setInt(YarnConfiguration.DEBUG_NM_DELETE_DELAY_SEC, 1);
NMMemoryStateStoreService stateStore = new NMMemoryStateStoreService();
stateStore.init(conf);
stateStore.start();
DeletionService del =
new DeletionService(new FakeDefaultContainerExecutor(), stateStore);
try {
del.init(conf);
del.start();
for (Path p : content) {
assertTrue(lfs.util().exists(new Path(baseDirs.get(0), p)));
del.delete((Long.parseLong(p.getName()) % 2) == 0 ? null : "dingo",
p, baseDirs.toArray(new Path[4]));
}
// restart the deletion service
del.stop();
del = new DeletionService(new FakeDefaultContainerExecutor(),
stateStore);
del.init(conf);
del.start();
// verify paths are still eventually deleted
int msecToWait = 10 * 1000;
for (Path p : baseDirs) {
for (Path q : content) {
Path fp = new Path(p, q);
while (msecToWait > 0 && lfs.util().exists(fp)) {
Thread.sleep(100);
msecToWait -= 100;
}
assertFalse(lfs.util().exists(fp));
}
}
} finally {
del.close();
stateStore.close();
}
}
}
| 11,626 | 31.387187 | 85 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManager.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.nodemanager;
import static org.junit.Assert.fail;
import java.io.IOException;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
import org.junit.Test;
public class TestNodeManager {
public static final class InvalidContainerExecutor extends
DefaultContainerExecutor {
@Override
public void init() throws IOException {
throw new IOException("dummy executor init called");
}
}
@Test
public void testContainerExecutorInitCall() {
NodeManager nm = new NodeManager();
YarnConfiguration conf = new YarnConfiguration();
conf.setClass(YarnConfiguration.NM_CONTAINER_EXECUTOR,
InvalidContainerExecutor.class,
ContainerExecutor.class);
try {
nm.init(conf);
fail("Init should fail");
} catch (YarnRuntimeException e) {
//PASS
assert(e.getCause().getMessage().contains("dummy executor init called"));
} finally {
nm.stop();
}
}
}
| 1,832 | 30.603448 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerShutdown.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.nodemanager;
import java.io.BufferedReader;
import java.io.File;
import java.io.FileReader;
import java.io.IOException;
import java.io.PrintWriter;
import java.net.InetAddress;
import java.net.InetSocketAddress;
import java.security.PrivilegedAction;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.junit.Assert;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileContext;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.UnsupportedFileSystemException;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.util.Shell;
import org.apache.hadoop.yarn.api.ContainerManagementProtocol;
import org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusesRequest;
import org.apache.hadoop.yarn.api.protocolrecords.StartContainerRequest;
import org.apache.hadoop.yarn.api.protocolrecords.StartContainersRequest;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
import org.apache.hadoop.yarn.api.records.ContainerState;
import org.apache.hadoop.yarn.api.records.ContainerStatus;
import org.apache.hadoop.yarn.api.records.LocalResource;
import org.apache.hadoop.yarn.api.records.LocalResourceType;
import org.apache.hadoop.yarn.api.records.LocalResourceVisibility;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.api.records.URL;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.event.Dispatcher;
import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.factories.RecordFactory;
import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
import org.apache.hadoop.yarn.ipc.YarnRPC;
import org.apache.hadoop.yarn.security.NMTokenIdentifier;
import org.apache.hadoop.yarn.server.api.records.MasterKey;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.TestContainerManager;
import org.apache.hadoop.yarn.server.utils.BuilderUtils;
import org.apache.hadoop.yarn.util.ConverterUtils;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
public class TestNodeManagerShutdown {
static final File basedir =
new File("target", TestNodeManagerShutdown.class.getName());
static final File tmpDir = new File(basedir, "tmpDir");
static final File logsDir = new File(basedir, "logs");
static final File remoteLogsDir = new File(basedir, "remotelogs");
static final File nmLocalDir = new File(basedir, "nm0");
static final File processStartFile = new File(tmpDir, "start_file.txt")
.getAbsoluteFile();
static final RecordFactory recordFactory = RecordFactoryProvider
.getRecordFactory(null);
static final String user = "nobody";
private FileContext localFS;
private ContainerId cId;
private NodeManager nm;
@Before
public void setup() throws UnsupportedFileSystemException {
localFS = FileContext.getLocalFSFileContext();
tmpDir.mkdirs();
logsDir.mkdirs();
remoteLogsDir.mkdirs();
nmLocalDir.mkdirs();
// Construct the Container-id
cId = createContainerId();
}
@After
public void tearDown() throws IOException, InterruptedException {
if (nm != null) {
nm.stop();
}
localFS.delete(new Path(basedir.getPath()), true);
}
@Test
public void testStateStoreRemovalOnDecommission() throws IOException {
final File recoveryDir = new File(basedir, "nm-recovery");
nm = new TestNodeManager();
YarnConfiguration conf = createNMConfig();
conf.setBoolean(YarnConfiguration.NM_RECOVERY_ENABLED, true);
conf.set(YarnConfiguration.NM_RECOVERY_DIR, recoveryDir.getAbsolutePath());
// verify state store is not removed on normal shutdown
nm.init(conf);
nm.start();
Assert.assertTrue(recoveryDir.exists());
Assert.assertTrue(recoveryDir.isDirectory());
nm.stop();
nm = null;
Assert.assertTrue(recoveryDir.exists());
Assert.assertTrue(recoveryDir.isDirectory());
// verify state store is removed on decommissioned shutdown
nm = new TestNodeManager();
nm.init(conf);
nm.start();
Assert.assertTrue(recoveryDir.exists());
Assert.assertTrue(recoveryDir.isDirectory());
nm.getNMContext().setDecommissioned(true);
nm.stop();
nm = null;
Assert.assertFalse(recoveryDir.exists());
}
@Test
public void testKillContainersOnShutdown() throws IOException,
YarnException {
nm = new TestNodeManager();
nm.init(createNMConfig());
nm.start();
startContainer(nm, cId, localFS, tmpDir, processStartFile);
final int MAX_TRIES=20;
int numTries = 0;
while (!processStartFile.exists() && numTries < MAX_TRIES) {
try {
Thread.sleep(500);
} catch (InterruptedException ex) {ex.printStackTrace();}
numTries++;
}
nm.stop();
// Now verify the contents of the file. Script generates a message when it
// receives a sigterm so we look for that. We cannot perform this check on
// Windows, because the process is not notified when killed by winutils.
// There is no way for the process to trap and respond. Instead, we can
// verify that the job object with ID matching container ID no longer exists.
if (Shell.WINDOWS) {
Assert.assertFalse("Process is still alive!",
DefaultContainerExecutor.containerIsAlive(cId.toString()));
} else {
BufferedReader reader =
new BufferedReader(new FileReader(processStartFile));
boolean foundSigTermMessage = false;
while (true) {
String line = reader.readLine();
if (line == null) {
break;
}
if (line.contains("SIGTERM")) {
foundSigTermMessage = true;
break;
}
}
Assert.assertTrue("Did not find sigterm message", foundSigTermMessage);
reader.close();
}
}
public static void startContainer(NodeManager nm, ContainerId cId,
FileContext localFS, File scriptFileDir, File processStartFile)
throws IOException, YarnException {
File scriptFile =
createUnhaltingScriptFile(cId, scriptFileDir, processStartFile);
ContainerLaunchContext containerLaunchContext =
recordFactory.newRecordInstance(ContainerLaunchContext.class);
NodeId nodeId = BuilderUtils.newNodeId(InetAddress.getByName("localhost")
.getCanonicalHostName(), 12345);
URL localResourceUri =
ConverterUtils.getYarnUrlFromPath(localFS
.makeQualified(new Path(scriptFile.getAbsolutePath())));
LocalResource localResource =
recordFactory.newRecordInstance(LocalResource.class);
localResource.setResource(localResourceUri);
localResource.setSize(-1);
localResource.setVisibility(LocalResourceVisibility.APPLICATION);
localResource.setType(LocalResourceType.FILE);
localResource.setTimestamp(scriptFile.lastModified());
String destinationFile = "dest_file";
Map<String, LocalResource> localResources =
new HashMap<String, LocalResource>();
localResources.put(destinationFile, localResource);
containerLaunchContext.setLocalResources(localResources);
List<String> commands = Arrays.asList(Shell.getRunScriptCommand(scriptFile));
containerLaunchContext.setCommands(commands);
final InetSocketAddress containerManagerBindAddress =
NetUtils.createSocketAddrForHost("127.0.0.1", 12345);
UserGroupInformation currentUser = UserGroupInformation
.createRemoteUser(cId.toString());
org.apache.hadoop.security.token.Token<NMTokenIdentifier> nmToken =
ConverterUtils.convertFromYarn(
nm.getNMContext().getNMTokenSecretManager()
.createNMToken(cId.getApplicationAttemptId(), nodeId, user),
containerManagerBindAddress);
currentUser.addToken(nmToken);
ContainerManagementProtocol containerManager =
currentUser.doAs(new PrivilegedAction<ContainerManagementProtocol>() {
@Override
public ContainerManagementProtocol run() {
Configuration conf = new Configuration();
YarnRPC rpc = YarnRPC.create(conf);
InetSocketAddress containerManagerBindAddress =
NetUtils.createSocketAddrForHost("127.0.0.1", 12345);
return (ContainerManagementProtocol) rpc.getProxy(ContainerManagementProtocol.class,
containerManagerBindAddress, conf);
}
});
StartContainerRequest scRequest =
StartContainerRequest.newInstance(containerLaunchContext,
TestContainerManager.createContainerToken(cId, 0,
nodeId, user, nm.getNMContext().getContainerTokenSecretManager()));
List<StartContainerRequest> list = new ArrayList<StartContainerRequest>();
list.add(scRequest);
StartContainersRequest allRequests =
StartContainersRequest.newInstance(list);
containerManager.startContainers(allRequests);
List<ContainerId> containerIds = new ArrayList<ContainerId>();
containerIds.add(cId);
GetContainerStatusesRequest request =
GetContainerStatusesRequest.newInstance(containerIds);
ContainerStatus containerStatus =
containerManager.getContainerStatuses(request).getContainerStatuses().get(0);
Assert.assertEquals(ContainerState.RUNNING, containerStatus.getState());
}
public static ContainerId createContainerId() {
ApplicationId appId = ApplicationId.newInstance(0, 0);
ApplicationAttemptId appAttemptId =
ApplicationAttemptId.newInstance(appId, 1);
ContainerId containerId = ContainerId.newContainerId(appAttemptId, 0);
return containerId;
}
private YarnConfiguration createNMConfig() {
YarnConfiguration conf = new YarnConfiguration();
conf.setInt(YarnConfiguration.NM_PMEM_MB, 5*1024); // 5GB
conf.set(YarnConfiguration.NM_ADDRESS, "127.0.0.1:12345");
conf.set(YarnConfiguration.NM_LOCALIZER_ADDRESS, "127.0.0.1:12346");
conf.set(YarnConfiguration.NM_LOG_DIRS, logsDir.getAbsolutePath());
conf.set(YarnConfiguration.NM_REMOTE_APP_LOG_DIR, remoteLogsDir.getAbsolutePath());
conf.set(YarnConfiguration.NM_LOCAL_DIRS, nmLocalDir.getAbsolutePath());
conf.setLong(YarnConfiguration.NM_LOG_RETAIN_SECONDS, 1);
return conf;
}
/**
* Creates a script to run a container that will run forever unless
* stopped by external means.
*/
private static File createUnhaltingScriptFile(ContainerId cId,
File scriptFileDir, File processStartFile) throws IOException {
File scriptFile = Shell.appendScriptExtension(scriptFileDir, "scriptFile");
PrintWriter fileWriter = new PrintWriter(scriptFile);
if (Shell.WINDOWS) {
fileWriter.println("@echo \"Running testscript for delayed kill\"");
fileWriter.println("@echo \"Writing pid to start file\"");
fileWriter.println("@echo " + cId + ">> " + processStartFile);
fileWriter.println("@pause");
} else {
fileWriter.write("#!/bin/bash\n\n");
fileWriter.write("echo \"Running testscript for delayed kill\"\n");
fileWriter.write("hello=\"Got SIGTERM\"\n");
fileWriter.write("umask 0\n");
fileWriter.write("trap \"echo $hello >> " + processStartFile +
"\" SIGTERM\n");
fileWriter.write("echo \"Writing pid to start file\"\n");
fileWriter.write("echo $$ >> " + processStartFile + "\n");
fileWriter.write("while true; do\ndate >> /dev/null;\n done\n");
}
fileWriter.close();
return scriptFile;
}
class TestNodeManager extends NodeManager {
@Override
protected NodeStatusUpdater createNodeStatusUpdater(Context context,
Dispatcher dispatcher, NodeHealthCheckerService healthChecker) {
MockNodeStatusUpdater myNodeStatusUpdater =
new MockNodeStatusUpdater(context, dispatcher, healthChecker, metrics);
return myNodeStatusUpdater;
}
public void setMasterKey(MasterKey masterKey) {
getNMContext().getContainerTokenSecretManager().setMasterKey(masterKey);
}
}
}
| 13,098 | 39.55418 | 96 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLinuxContainerExecutor.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.nodemanager;
import static org.apache.hadoop.fs.CreateFlag.CREATE;
import static org.apache.hadoop.fs.CreateFlag.OVERWRITE;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.PrintWriter;
import java.net.InetSocketAddress;
import java.util.ArrayList;
import java.util.EnumSet;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.fs.FileContext;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.server.nodemanager.ContainerExecutor.Signal;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ContainerLocalizer;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ResourceLocalizationService;
import org.apache.hadoop.yarn.server.nodemanager.executor.ContainerReacquisitionContext;
import org.apache.hadoop.yarn.server.nodemanager.executor.ContainerSignalContext;
import org.apache.hadoop.yarn.server.nodemanager.executor.ContainerStartContext;
import org.apache.hadoop.yarn.server.nodemanager.executor.DeletionAsUserContext;
import org.apache.hadoop.yarn.server.nodemanager.executor.LocalizerStartContext;
import org.apache.hadoop.yarn.server.nodemanager.util.LCEResourcesHandler;
import org.junit.After;
import org.junit.Assert;
import org.junit.Assume;
import org.junit.Before;
import org.junit.Test;
/**
* This is intended to test the LinuxContainerExecutor code, but because of some
* security restrictions this can only be done with some special setup first. <br>
* <ol>
* <li>Compile the code with container-executor.conf.dir set to the location you
* want for testing. <br>
*
* <pre>
* <code>
* > mvn clean install -Pnative -Dcontainer-executor.conf.dir=/etc/hadoop
* -DskipTests
* </code>
* </pre>
*
* <li>Set up <code>${container-executor.conf.dir}/container-executor.cfg</code>
* container-executor.cfg needs to be owned by root and have in it the proper
* config values. <br>
*
* <pre>
* <code>
* > cat /etc/hadoop/container-executor.cfg
* yarn.nodemanager.linux-container-executor.group=mapred
* #depending on the user id of the application.submitter option
* min.user.id=1
* > sudo chown root:root /etc/hadoop/container-executor.cfg
* > sudo chmod 444 /etc/hadoop/container-executor.cfg
* </code>
* </pre>
*
* <li>Move the binary and set proper permissions on it. It needs to be owned by
* root, the group needs to be the group configured in container-executor.cfg,
* and it needs the setuid bit set. (The build will also overwrite it so you
* need to move it to a place that you can support it. <br>
*
* <pre>
* <code>
* > cp ./hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/c/container-executor/container-executor /tmp/
* > sudo chown root:mapred /tmp/container-executor
* > sudo chmod 4050 /tmp/container-executor
* </code>
* </pre>
*
* <li>Run the tests with the execution enabled (The user you run the tests as
* needs to be part of the group from the config. <br>
*
* <pre>
* <code>
* mvn test -Dtest=TestLinuxContainerExecutor -Dapplication.submitter=nobody -Dcontainer-executor.path=/tmp/container-executor
* </code>
* </pre>
*
* <li>The test suite also contains tests to test mounting of CGroups. By
* default, these tests are not run. To run them, add -Dcgroups.mount=<mount-point>
* Please note that the test does not unmount the CGroups at the end of the test,
* since that requires root permissions. <br>
*
* <li>The tests that are run are sensitive to directory permissions. All parent
* directories must be searchable by the user that the tasks are run as. If you
* wish to run the tests in a different directory, please set it using
* -Dworkspace.dir
*
* </ol>
*/
public class TestLinuxContainerExecutor {
private static final Log LOG = LogFactory
.getLog(TestLinuxContainerExecutor.class);
private static File workSpace;
static {
String basedir = System.getProperty("workspace.dir");
if(basedir == null || basedir.isEmpty()) {
basedir = "target";
}
workSpace = new File(basedir,
TestLinuxContainerExecutor.class.getName() + "-workSpace");
}
private LinuxContainerExecutor exec = null;
private String appSubmitter = null;
private LocalDirsHandlerService dirsHandler;
private Configuration conf;
private FileContext files;
@Before
public void setup() throws Exception {
files = FileContext.getLocalFSFileContext();
Path workSpacePath = new Path(workSpace.getAbsolutePath());
files.mkdir(workSpacePath, null, true);
FileUtil.chmod(workSpace.getAbsolutePath(), "777");
File localDir = new File(workSpace.getAbsoluteFile(), "localDir");
files.mkdir(new Path(localDir.getAbsolutePath()), new FsPermission("777"),
false);
File logDir = new File(workSpace.getAbsoluteFile(), "logDir");
files.mkdir(new Path(logDir.getAbsolutePath()), new FsPermission("777"),
false);
String exec_path = System.getProperty("container-executor.path");
if (exec_path != null && !exec_path.isEmpty()) {
conf = new Configuration(false);
conf.setClass("fs.AbstractFileSystem.file.impl",
org.apache.hadoop.fs.local.LocalFs.class,
org.apache.hadoop.fs.AbstractFileSystem.class);
appSubmitter = System.getProperty("application.submitter");
if (appSubmitter == null || appSubmitter.isEmpty()) {
appSubmitter = "nobody";
}
conf.set(YarnConfiguration.NM_NONSECURE_MODE_LOCAL_USER_KEY, appSubmitter);
LOG.info("Setting " + YarnConfiguration.NM_LINUX_CONTAINER_EXECUTOR_PATH
+ "=" + exec_path);
conf.set(YarnConfiguration.NM_LINUX_CONTAINER_EXECUTOR_PATH, exec_path);
exec = new LinuxContainerExecutor();
exec.setConf(conf);
conf.set(YarnConfiguration.NM_LOCAL_DIRS, localDir.getAbsolutePath());
conf.set(YarnConfiguration.NM_LOG_DIRS, logDir.getAbsolutePath());
dirsHandler = new LocalDirsHandlerService();
dirsHandler.init(conf);
List<String> localDirs = dirsHandler.getLocalDirs();
for (String dir : localDirs) {
Path userDir = new Path(dir, ContainerLocalizer.USERCACHE);
files.mkdir(userDir, new FsPermission("777"), false);
// $local/filecache
Path fileDir = new Path(dir, ContainerLocalizer.FILECACHE);
files.mkdir(fileDir, new FsPermission("777"), false);
}
}
}
@After
public void tearDown() throws Exception {
FileContext.getLocalFSFileContext().delete(
new Path(workSpace.getAbsolutePath()), true);
}
private void cleanupUserAppCache(String user) throws Exception {
List<String> localDirs = dirsHandler.getLocalDirs();
for (String dir : localDirs) {
Path usercachedir = new Path(dir, ContainerLocalizer.USERCACHE);
Path userdir = new Path(usercachedir, user);
Path appcachedir = new Path(userdir, ContainerLocalizer.APPCACHE);
exec.deleteAsUser(new DeletionAsUserContext.Builder()
.setUser(user)
.setSubDir(appcachedir)
.build());
FileContext.getLocalFSFileContext().delete(usercachedir, true);
}
}
private void cleanupUserFileCache(String user) {
List<String> localDirs = dirsHandler.getLocalDirs();
for (String dir : localDirs) {
Path filecache = new Path(dir, ContainerLocalizer.FILECACHE);
Path filedir = new Path(filecache, user);
exec.deleteAsUser(new DeletionAsUserContext.Builder()
.setUser(user)
.setSubDir(filedir)
.build());
}
}
private void cleanupLogDirs(String user) {
List<String> logDirs = dirsHandler.getLogDirs();
for (String dir : logDirs) {
String appId = "APP_" + id;
String containerId = "CONTAINER_" + (id - 1);
Path appdir = new Path(dir, appId);
Path containerdir = new Path(appdir, containerId);
exec.deleteAsUser(new DeletionAsUserContext.Builder()
.setUser(user)
.setSubDir(containerdir)
.build());
}
}
private void cleanupAppFiles(String user) throws Exception {
cleanupUserAppCache(user);
cleanupUserFileCache(user);
cleanupLogDirs(user);
String[] files =
{ "launch_container.sh", "container_tokens", "touch-file" };
Path ws = new Path(workSpace.toURI());
for (String file : files) {
File f = new File(workSpace, file);
if (f.exists()) {
exec.deleteAsUser(new DeletionAsUserContext.Builder()
.setUser(user)
.setSubDir(new Path(file))
.setBasedirs(ws)
.build());
}
}
}
private boolean shouldRun() {
if (exec == null) {
LOG.warn("Not running test because container-executor.path is not set");
return false;
}
return true;
}
private String writeScriptFile(String... cmd) throws IOException {
File f = File.createTempFile("TestLinuxContainerExecutor", ".sh");
f.deleteOnExit();
PrintWriter p = new PrintWriter(new FileOutputStream(f));
p.println("#!/bin/sh");
p.print("exec");
for (String part : cmd) {
p.print(" '");
p.print(part.replace("\\", "\\\\").replace("'", "\\'"));
p.print("'");
}
p.println();
p.close();
return f.getAbsolutePath();
}
private int id = 0;
private synchronized int getNextId() {
id += 1;
return id;
}
private ContainerId getNextContainerId() {
ContainerId cId = mock(ContainerId.class);
String id = "CONTAINER_" + getNextId();
when(cId.toString()).thenReturn(id);
return cId;
}
private int runAndBlock(String... cmd) throws IOException {
return runAndBlock(getNextContainerId(), cmd);
}
private int runAndBlock(ContainerId cId, String... cmd) throws IOException {
String appId = "APP_" + getNextId();
Container container = mock(Container.class);
ContainerLaunchContext context = mock(ContainerLaunchContext.class);
HashMap<String, String> env = new HashMap<String, String>();
when(container.getContainerId()).thenReturn(cId);
when(container.getLaunchContext()).thenReturn(context);
when(context.getEnvironment()).thenReturn(env);
String script = writeScriptFile(cmd);
Path scriptPath = new Path(script);
Path tokensPath = new Path("/dev/null");
Path workDir = new Path(workSpace.getAbsolutePath());
Path pidFile = new Path(workDir, "pid.txt");
exec.activateContainer(cId, pidFile);
return exec.launchContainer(new ContainerStartContext.Builder()
.setContainer(container)
.setNmPrivateContainerScriptPath(scriptPath)
.setNmPrivateTokensPath(tokensPath)
.setUser(appSubmitter)
.setAppId(appId)
.setContainerWorkDir(workDir)
.setLocalDirs(dirsHandler.getLocalDirs())
.setLogDirs(dirsHandler.getLogDirs())
.build());
}
@Test
public void testContainerLocalizer() throws Exception {
Assume.assumeTrue(shouldRun());
String locId = "container_01_01";
Path nmPrivateContainerTokensPath =
dirsHandler
.getLocalPathForWrite(ResourceLocalizationService.NM_PRIVATE_DIR
+ Path.SEPARATOR
+ String.format(ContainerLocalizer.TOKEN_FILE_NAME_FMT, locId));
files.create(nmPrivateContainerTokensPath, EnumSet.of(CREATE, OVERWRITE));
Configuration config = new YarnConfiguration(conf);
InetSocketAddress nmAddr =
config.getSocketAddr(YarnConfiguration.NM_BIND_HOST,
YarnConfiguration.NM_LOCALIZER_ADDRESS,
YarnConfiguration.DEFAULT_NM_LOCALIZER_ADDRESS,
YarnConfiguration.DEFAULT_NM_LOCALIZER_PORT);
String appId = "application_01_01";
exec = new LinuxContainerExecutor() {
@Override
public void buildMainArgs(List<String> command, String user,
String appId, String locId, InetSocketAddress nmAddr,
List<String> localDirs) {
MockContainerLocalizer.buildMainArgs(command, user, appId, locId,
nmAddr, localDirs);
}
};
exec.setConf(conf);
exec.startLocalizer(new LocalizerStartContext.Builder()
.setNmPrivateContainerTokens(nmPrivateContainerTokensPath)
.setNmAddr(nmAddr)
.setUser(appSubmitter)
.setAppId(appId)
.setLocId(locId)
.setDirsHandler(dirsHandler)
.build());
String locId2 = "container_01_02";
Path nmPrivateContainerTokensPath2 =
dirsHandler
.getLocalPathForWrite(ResourceLocalizationService.NM_PRIVATE_DIR
+ Path.SEPARATOR
+ String.format(ContainerLocalizer.TOKEN_FILE_NAME_FMT, locId2));
files.create(nmPrivateContainerTokensPath2, EnumSet.of(CREATE, OVERWRITE));
exec.startLocalizer(new LocalizerStartContext.Builder()
.setNmPrivateContainerTokens(nmPrivateContainerTokensPath2)
.setNmAddr(nmAddr)
.setUser(appSubmitter)
.setAppId(appId)
.setLocId(locId2)
.setDirsHandler(dirsHandler)
.build());
cleanupUserAppCache(appSubmitter);
}
@Test
public void testContainerLaunch() throws Exception {
Assume.assumeTrue(shouldRun());
String expectedRunAsUser =
conf.get(YarnConfiguration.NM_NONSECURE_MODE_LOCAL_USER_KEY,
YarnConfiguration.DEFAULT_NM_NONSECURE_MODE_LOCAL_USER);
File touchFile = new File(workSpace, "touch-file");
int ret = runAndBlock("touch", touchFile.getAbsolutePath());
assertEquals(0, ret);
FileStatus fileStatus =
FileContext.getLocalFSFileContext().getFileStatus(
new Path(touchFile.getAbsolutePath()));
assertEquals(expectedRunAsUser, fileStatus.getOwner());
cleanupAppFiles(expectedRunAsUser);
}
@Test
public void testNonSecureRunAsSubmitter() throws Exception {
Assume.assumeTrue(shouldRun());
Assume.assumeFalse(UserGroupInformation.isSecurityEnabled());
String expectedRunAsUser = appSubmitter;
conf.set(YarnConfiguration.NM_NONSECURE_MODE_LIMIT_USERS, "false");
exec.setConf(conf);
File touchFile = new File(workSpace, "touch-file");
int ret = runAndBlock("touch", touchFile.getAbsolutePath());
assertEquals(0, ret);
FileStatus fileStatus =
FileContext.getLocalFSFileContext().getFileStatus(
new Path(touchFile.getAbsolutePath()));
assertEquals(expectedRunAsUser, fileStatus.getOwner());
cleanupAppFiles(expectedRunAsUser);
// reset conf
conf.unset(YarnConfiguration.NM_NONSECURE_MODE_LIMIT_USERS);
exec.setConf(conf);
}
@Test
public void testContainerKill() throws Exception {
Assume.assumeTrue(shouldRun());
final ContainerId sleepId = getNextContainerId();
Thread t = new Thread() {
public void run() {
try {
runAndBlock(sleepId, "sleep", "100");
} catch (IOException e) {
LOG.warn("Caught exception while running sleep", e);
}
};
};
t.setDaemon(true); // If it does not exit we shouldn't block the test.
t.start();
assertTrue(t.isAlive());
String pid = null;
int count = 10;
while ((pid = exec.getProcessId(sleepId)) == null && count > 0) {
LOG.info("Sleeping for 200 ms before checking for pid ");
Thread.sleep(200);
count--;
}
assertNotNull(pid);
LOG.info("Going to killing the process.");
exec.signalContainer(new ContainerSignalContext.Builder()
.setUser(appSubmitter)
.setPid(pid)
.setSignal(Signal.TERM)
.build());
LOG.info("sleeping for 100ms to let the sleep be killed");
Thread.sleep(100);
assertFalse(t.isAlive());
cleanupAppFiles(appSubmitter);
}
@Test
public void testCGroups() throws Exception {
Assume.assumeTrue(shouldRun());
String cgroupsMount = System.getProperty("cgroups.mount");
Assume.assumeTrue((cgroupsMount != null) && !cgroupsMount.isEmpty());
assertTrue("Cgroups mount point does not exist", new File(
cgroupsMount).exists());
List<String> cgroupKVs = new ArrayList<>();
String hierarchy = "hadoop-yarn";
String[] controllers = { "cpu", "net_cls" };
for (String controller : controllers) {
cgroupKVs.add(controller + "=" + cgroupsMount + "/" + controller);
assertTrue(new File(cgroupsMount, controller).exists());
}
try {
exec.mountCgroups(cgroupKVs, hierarchy);
for (String controller : controllers) {
assertTrue(controller + " cgroup not mounted", new File(
cgroupsMount + "/" + controller + "/tasks").exists());
assertTrue(controller + " cgroup hierarchy not created",
new File(cgroupsMount + "/" + controller + "/" + hierarchy).exists());
assertTrue(controller + " cgroup hierarchy created incorrectly",
new File(cgroupsMount + "/" + controller + "/" + hierarchy
+ "/tasks").exists());
}
} catch (IOException ie) {
fail("Couldn't mount cgroups " + ie.toString());
throw ie;
}
}
@Test
public void testLocalUser() throws Exception {
Assume.assumeTrue(shouldRun());
try {
// nonsecure default
Configuration conf = new YarnConfiguration();
conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,
"simple");
UserGroupInformation.setConfiguration(conf);
LinuxContainerExecutor lce = new LinuxContainerExecutor();
lce.setConf(conf);
Assert.assertEquals(
YarnConfiguration.DEFAULT_NM_NONSECURE_MODE_LOCAL_USER,
lce.getRunAsUser("foo"));
// nonsecure custom setting
conf.set(YarnConfiguration.NM_NONSECURE_MODE_LOCAL_USER_KEY, "bar");
lce = new LinuxContainerExecutor();
lce.setConf(conf);
Assert.assertEquals("bar", lce.getRunAsUser("foo"));
// nonsecure without limits
conf.set(YarnConfiguration.NM_NONSECURE_MODE_LOCAL_USER_KEY, "bar");
conf.set(YarnConfiguration.NM_NONSECURE_MODE_LIMIT_USERS, "false");
lce = new LinuxContainerExecutor();
lce.setConf(conf);
Assert.assertEquals("foo", lce.getRunAsUser("foo"));
// secure
conf = new YarnConfiguration();
conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,
"kerberos");
UserGroupInformation.setConfiguration(conf);
lce = new LinuxContainerExecutor();
lce.setConf(conf);
Assert.assertEquals("foo", lce.getRunAsUser("foo"));
} finally {
Configuration conf = new YarnConfiguration();
conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,
"simple");
UserGroupInformation.setConfiguration(conf);
}
}
@Test
public void testNonsecureUsernamePattern() throws Exception {
Assume.assumeTrue(shouldRun());
try {
// nonsecure default
Configuration conf = new YarnConfiguration();
conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,
"simple");
UserGroupInformation.setConfiguration(conf);
LinuxContainerExecutor lce = new LinuxContainerExecutor();
lce.setConf(conf);
lce.verifyUsernamePattern("foo");
try {
lce.verifyUsernamePattern("foo/x");
fail();
} catch (IllegalArgumentException ex) {
// NOP
} catch (Throwable ex) {
fail(ex.toString());
}
// nonsecure custom setting
conf.set(YarnConfiguration.NM_NONSECURE_MODE_USER_PATTERN_KEY, "foo");
lce = new LinuxContainerExecutor();
lce.setConf(conf);
lce.verifyUsernamePattern("foo");
try {
lce.verifyUsernamePattern("bar");
fail();
} catch (IllegalArgumentException ex) {
// NOP
} catch (Throwable ex) {
fail(ex.toString());
}
// secure, pattern matching does not kick in.
conf = new YarnConfiguration();
conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,
"kerberos");
UserGroupInformation.setConfiguration(conf);
lce = new LinuxContainerExecutor();
lce.setConf(conf);
lce.verifyUsernamePattern("foo");
lce.verifyUsernamePattern("foo/w");
} finally {
Configuration conf = new YarnConfiguration();
conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,
"simple");
UserGroupInformation.setConfiguration(conf);
}
}
@Test(timeout = 10000)
public void testPostExecuteAfterReacquisition() throws Exception {
Assume.assumeTrue(shouldRun());
// make up some bogus container ID
ApplicationId appId = ApplicationId.newInstance(12345, 67890);
ApplicationAttemptId attemptId =
ApplicationAttemptId.newInstance(appId, 54321);
ContainerId cid = ContainerId.newContainerId(attemptId, 9876);
Configuration conf = new YarnConfiguration();
conf.setClass(YarnConfiguration.NM_LINUX_CONTAINER_RESOURCES_HANDLER,
TestResourceHandler.class, LCEResourcesHandler.class);
LinuxContainerExecutor lce = new LinuxContainerExecutor();
lce.setConf(conf);
try {
lce.init();
} catch (IOException e) {
// expected if LCE isn't setup right, but not necessary for this test
}
lce.reacquireContainer(new ContainerReacquisitionContext.Builder()
.setUser("foouser")
.setContainerId(cid)
.build());
assertTrue("postExec not called after reacquisition",
TestResourceHandler.postExecContainers.contains(cid));
}
private static class TestResourceHandler implements LCEResourcesHandler {
static Set<ContainerId> postExecContainers = new HashSet<ContainerId>();
@Override
public void setConf(Configuration conf) {
}
@Override
public Configuration getConf() {
return null;
}
@Override
public void init(LinuxContainerExecutor lce) throws IOException {
}
@Override
public void preExecute(ContainerId containerId, Resource containerResource)
throws IOException {
}
@Override
public void postExecute(ContainerId containerId) {
postExecContainers.add(containerId);
}
@Override
public String getResourcesOption(ContainerId containerId) {
return null;
}
}
}
| 24,199 | 35.011905 | 146 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdaterForLabels.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.nodemanager;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.Set;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.service.ServiceOperations;
import org.apache.hadoop.yarn.api.records.NodeLabel;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.event.Dispatcher;
import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.factories.RecordFactory;
import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
import org.apache.hadoop.yarn.nodelabels.CommonNodeLabelsManager;
import org.apache.hadoop.yarn.nodelabels.NodeLabelTestBase;
import org.apache.hadoop.yarn.server.api.ResourceTracker;
import org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatRequest;
import org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatResponse;
import org.apache.hadoop.yarn.server.api.protocolrecords.RegisterNodeManagerRequest;
import org.apache.hadoop.yarn.server.api.protocolrecords.RegisterNodeManagerResponse;
import org.apache.hadoop.yarn.server.api.protocolrecords.UnRegisterNodeManagerRequest;
import org.apache.hadoop.yarn.server.api.protocolrecords.UnRegisterNodeManagerResponse;
import org.apache.hadoop.yarn.server.api.records.MasterKey;
import org.apache.hadoop.yarn.server.api.records.NodeAction;
import org.apache.hadoop.yarn.server.api.records.NodeStatus;
import org.apache.hadoop.yarn.server.api.records.impl.pb.MasterKeyPBImpl;
import org.apache.hadoop.yarn.server.nodemanager.nodelabels.NodeLabelsProvider;
import org.apache.hadoop.yarn.server.utils.YarnServerBuilderUtils;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
public class TestNodeStatusUpdaterForLabels extends NodeLabelTestBase {
private static final RecordFactory recordFactory = RecordFactoryProvider
.getRecordFactory(null);
private NodeManager nm;
protected DummyNodeLabelsProvider dummyLabelsProviderRef;
@Before
public void setup() {
dummyLabelsProviderRef = new DummyNodeLabelsProvider();
}
@After
public void tearDown() {
if (null != nm) {
ServiceOperations.stop(nm);
}
}
private class ResourceTrackerForLabels implements ResourceTracker {
int heartbeatID = 0;
Set<NodeLabel> labels;
private boolean receivedNMHeartbeat = false;
private boolean receivedNMRegister = false;
private MasterKey createMasterKey() {
MasterKey masterKey = new MasterKeyPBImpl();
masterKey.setKeyId(123);
masterKey.setBytes(ByteBuffer.wrap(new byte[] { new Integer(123)
.byteValue() }));
return masterKey;
}
@Override
public RegisterNodeManagerResponse registerNodeManager(
RegisterNodeManagerRequest request) throws YarnException, IOException {
labels = request.getNodeLabels();
RegisterNodeManagerResponse response =
recordFactory.newRecordInstance(RegisterNodeManagerResponse.class);
response.setNodeAction(NodeAction.NORMAL);
response.setContainerTokenMasterKey(createMasterKey());
response.setNMTokenMasterKey(createMasterKey());
response.setAreNodeLabelsAcceptedByRM(labels != null);
synchronized (ResourceTrackerForLabels.class) {
receivedNMRegister = true;
ResourceTrackerForLabels.class.notifyAll();
}
return response;
}
public void waitTillHeartbeat() {
if (receivedNMHeartbeat) {
return;
}
int i = 500;
while (!receivedNMHeartbeat && i > 0) {
synchronized (ResourceTrackerForLabels.class) {
if (!receivedNMHeartbeat) {
try {
System.out
.println("In ResourceTrackerForLabels waiting for heartbeat : "
+ System.currentTimeMillis());
ResourceTrackerForLabels.class.wait(500l);
// to avoid race condition, i.e. sendOutofBandHeartBeat can be
// sent before NSU thread has gone to sleep, hence we wait and try
// to resend heartbeat again
nm.getNodeStatusUpdater().sendOutofBandHeartBeat();
ResourceTrackerForLabels.class.wait(500l);
i--;
} catch (InterruptedException e) {
Assert.fail("Exception caught while waiting for Heartbeat");
e.printStackTrace();
}
}
}
}
if (!receivedNMHeartbeat) {
Assert.fail("Heartbeat dint receive even after waiting");
}
}
public void waitTillRegister() {
if (receivedNMRegister) {
return;
}
while (!receivedNMRegister) {
synchronized (ResourceTrackerForLabels.class) {
try {
ResourceTrackerForLabels.class.wait();
} catch (InterruptedException e) {
Assert.fail("Exception caught while waiting for register");
e.printStackTrace();
}
}
}
}
/**
* Flag to indicate received any
*/
public void resetNMHeartbeatReceiveFlag() {
synchronized (ResourceTrackerForLabels.class) {
receivedNMHeartbeat = false;
}
}
@Override
public NodeHeartbeatResponse nodeHeartbeat(NodeHeartbeatRequest request)
throws YarnException, IOException {
System.out.println("RTS receive heartbeat : "
+ System.currentTimeMillis());
labels = request.getNodeLabels();
NodeStatus nodeStatus = request.getNodeStatus();
nodeStatus.setResponseId(heartbeatID++);
NodeHeartbeatResponse nhResponse =
YarnServerBuilderUtils.newNodeHeartbeatResponse(heartbeatID,
NodeAction.NORMAL, null, null, null, null, 1000L);
// to ensure that heartbeats are sent only when required.
nhResponse.setNextHeartBeatInterval(Long.MAX_VALUE);
nhResponse.setAreNodeLabelsAcceptedByRM(labels != null);
synchronized (ResourceTrackerForLabels.class) {
receivedNMHeartbeat = true;
ResourceTrackerForLabels.class.notifyAll();
}
return nhResponse;
}
@Override
public UnRegisterNodeManagerResponse unRegisterNodeManager(
UnRegisterNodeManagerRequest request) throws YarnException, IOException {
return null;
}
}
public static class DummyNodeLabelsProvider extends NodeLabelsProvider {
@SuppressWarnings("unchecked")
private Set<NodeLabel> nodeLabels = CommonNodeLabelsManager.EMPTY_NODELABEL_SET;
public DummyNodeLabelsProvider() {
super(DummyNodeLabelsProvider.class.getName());
}
@Override
public synchronized Set<NodeLabel> getNodeLabels() {
return nodeLabels;
}
synchronized void setNodeLabels(Set<NodeLabel> nodeLabels) {
this.nodeLabels = nodeLabels;
}
}
private YarnConfiguration createNMConfigForDistributeNodeLabels() {
YarnConfiguration conf = new YarnConfiguration();
conf.set(YarnConfiguration.NODELABEL_CONFIGURATION_TYPE,
YarnConfiguration.DISTRIBUTED_NODELABEL_CONFIGURATION_TYPE);
return conf;
}
@Test
public void testNodeStatusUpdaterForNodeLabels() throws InterruptedException,
IOException {
final ResourceTrackerForLabels resourceTracker =
new ResourceTrackerForLabels();
nm = new NodeManager() {
@Override
protected NodeLabelsProvider createNodeLabelsProvider(
Configuration conf) throws IOException {
return dummyLabelsProviderRef;
}
@Override
protected NodeStatusUpdater createNodeStatusUpdater(Context context,
Dispatcher dispatcher, NodeHealthCheckerService healthChecker,
NodeLabelsProvider labelsProvider) {
return new NodeStatusUpdaterImpl(context, dispatcher, healthChecker,
metrics, labelsProvider) {
@Override
protected ResourceTracker getRMClient() {
return resourceTracker;
}
@Override
protected void stopRMProxy() {
return;
}
};
}
};
YarnConfiguration conf = createNMConfigForDistributeNodeLabels();
nm.init(conf);
resourceTracker.resetNMHeartbeatReceiveFlag();
nm.start();
resourceTracker.waitTillRegister();
assertNLCollectionEquals(resourceTracker.labels,
dummyLabelsProviderRef
.getNodeLabels());
resourceTracker.waitTillHeartbeat();// wait till the first heartbeat
resourceTracker.resetNMHeartbeatReceiveFlag();
// heartbeat with updated labels
dummyLabelsProviderRef.setNodeLabels(toNodeLabelSet("P"));
nm.getNodeStatusUpdater().sendOutofBandHeartBeat();
resourceTracker.waitTillHeartbeat();
assertNLCollectionEquals(resourceTracker.labels,
dummyLabelsProviderRef
.getNodeLabels());
resourceTracker.resetNMHeartbeatReceiveFlag();
// heartbeat without updating labels
nm.getNodeStatusUpdater().sendOutofBandHeartBeat();
resourceTracker.waitTillHeartbeat();
resourceTracker.resetNMHeartbeatReceiveFlag();
assertNull(
"If no change in labels then null should be sent as part of request",
resourceTracker.labels);
// provider return with null labels
dummyLabelsProviderRef.setNodeLabels(null);
nm.getNodeStatusUpdater().sendOutofBandHeartBeat();
resourceTracker.waitTillHeartbeat();
assertTrue("If provider sends null then empty labels should be sent",
resourceTracker.labels.isEmpty());
resourceTracker.resetNMHeartbeatReceiveFlag();
nm.stop();
}
}
| 10,483 | 34.78157 | 87 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeHealthService.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.nodemanager;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.PrintWriter;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileContext;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.util.NodeHealthScriptRunner;
import org.apache.hadoop.util.Shell;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.factories.RecordFactory;
import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
import org.apache.hadoop.yarn.server.api.records.NodeHealthStatus;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import static org.mockito.Mockito.doReturn;
import static org.mockito.Mockito.spy;
public class TestNodeHealthService {
private static volatile Log LOG = LogFactory
.getLog(TestNodeHealthService.class);
protected static File testRootDir = new File("target",
TestNodeHealthService.class.getName() + "-localDir").getAbsoluteFile();
final static File nodeHealthConfigFile = new File(testRootDir,
"modified-mapred-site.xml");
private File nodeHealthscriptFile = new File(testRootDir,
Shell.appendScriptExtension("failingscript"));
@Before
public void setup() {
testRootDir.mkdirs();
}
@After
public void tearDown() throws Exception {
if (testRootDir.exists()) {
FileContext.getLocalFSFileContext().delete(
new Path(testRootDir.getAbsolutePath()), true);
}
}
private void writeNodeHealthScriptFile(String scriptStr, boolean setExecutable)
throws IOException {
PrintWriter pw = null;
try {
FileUtil.setWritable(nodeHealthscriptFile, true);
FileUtil.setReadable(nodeHealthscriptFile, true);
pw = new PrintWriter(new FileOutputStream(nodeHealthscriptFile));
pw.println(scriptStr);
pw.flush();
} finally {
pw.close();
}
FileUtil.setExecutable(nodeHealthscriptFile, setExecutable);
}
private Configuration getConfForNodeHealthScript() {
Configuration conf = new Configuration();
conf.set(YarnConfiguration.NM_HEALTH_CHECK_SCRIPT_PATH,
nodeHealthscriptFile.getAbsolutePath());
conf.setLong(YarnConfiguration.NM_HEALTH_CHECK_INTERVAL_MS, 500);
conf.setLong(
YarnConfiguration.NM_HEALTH_CHECK_SCRIPT_TIMEOUT_MS, 1000);
return conf;
}
private void setHealthStatus(NodeHealthStatus healthStatus, boolean isHealthy,
String healthReport, long lastHealthReportTime) {
healthStatus.setHealthReport(healthReport);
healthStatus.setIsNodeHealthy(isHealthy);
healthStatus.setLastHealthReportTime(lastHealthReportTime);
}
@Test
public void testNodeHealthService() throws Exception {
RecordFactory factory = RecordFactoryProvider.getRecordFactory(null);
NodeHealthStatus healthStatus =
factory.newRecordInstance(NodeHealthStatus.class);
Configuration conf = getConfForNodeHealthScript();
conf.writeXml(new FileOutputStream(nodeHealthConfigFile));
conf.addResource(nodeHealthConfigFile.getName());
writeNodeHealthScriptFile("", true);
LocalDirsHandlerService dirsHandler = new LocalDirsHandlerService();
NodeHealthScriptRunner nodeHealthScriptRunner =
spy(NodeManager.getNodeHealthScriptRunner(conf));
NodeHealthCheckerService nodeHealthChecker = new NodeHealthCheckerService(
nodeHealthScriptRunner, dirsHandler);
nodeHealthChecker.init(conf);
doReturn(true).when(nodeHealthScriptRunner).isHealthy();
doReturn("").when(nodeHealthScriptRunner).getHealthReport();
setHealthStatus(healthStatus, nodeHealthChecker.isHealthy(),
nodeHealthChecker.getHealthReport(),
nodeHealthChecker.getLastHealthReportTime());
LOG.info("Checking initial healthy condition");
// Check proper report conditions.
Assert.assertTrue("Node health status reported unhealthy", healthStatus
.getIsNodeHealthy());
Assert.assertTrue("Node health status reported unhealthy", healthStatus
.getHealthReport().equals(nodeHealthChecker.getHealthReport()));
doReturn(false).when(nodeHealthScriptRunner).isHealthy();
// update health status
setHealthStatus(healthStatus, nodeHealthChecker.isHealthy(),
nodeHealthChecker.getHealthReport(),
nodeHealthChecker.getLastHealthReportTime());
LOG.info("Checking Healthy--->Unhealthy");
Assert.assertFalse("Node health status reported healthy", healthStatus
.getIsNodeHealthy());
Assert.assertTrue("Node health status reported healthy", healthStatus
.getHealthReport().equals(nodeHealthChecker.getHealthReport()));
doReturn(true).when(nodeHealthScriptRunner).isHealthy();
setHealthStatus(healthStatus, nodeHealthChecker.isHealthy(),
nodeHealthChecker.getHealthReport(),
nodeHealthChecker.getLastHealthReportTime());
LOG.info("Checking UnHealthy--->healthy");
// Check proper report conditions.
Assert.assertTrue("Node health status reported unhealthy", healthStatus
.getIsNodeHealthy());
Assert.assertTrue("Node health status reported unhealthy", healthStatus
.getHealthReport().equals(nodeHealthChecker.getHealthReport()));
// Healthy to timeout transition.
doReturn(false).when(nodeHealthScriptRunner).isHealthy();
doReturn(NodeHealthScriptRunner.NODE_HEALTH_SCRIPT_TIMED_OUT_MSG)
.when(nodeHealthScriptRunner).getHealthReport();
setHealthStatus(healthStatus, nodeHealthChecker.isHealthy(),
nodeHealthChecker.getHealthReport(),
nodeHealthChecker.getLastHealthReportTime());
LOG.info("Checking Healthy--->timeout");
Assert.assertFalse("Node health status reported healthy even after timeout",
healthStatus.getIsNodeHealthy());
Assert.assertTrue("Node script time out message not propogated",
healthStatus.getHealthReport().equals(
NodeHealthScriptRunner.NODE_HEALTH_SCRIPT_TIMED_OUT_MSG
+ NodeHealthCheckerService.SEPARATOR
+ nodeHealthChecker.getDiskHandler().getDisksHealthReport(false)));
}
}
| 7,093 | 40.00578 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestContainerManagerWithLCE.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.nodemanager;
import java.io.File;
import java.io.IOException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.UnsupportedFileSystemException;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.TestContainerManager;
import org.junit.After;
public class TestContainerManagerWithLCE extends TestContainerManager {
private static final Log LOG = LogFactory
.getLog(TestContainerManagerWithLCE.class);
public TestContainerManagerWithLCE() throws UnsupportedFileSystemException {
super();
}
static {
localDir =
new File("target",
TestContainerManagerWithLCE.class.getName() + "-localDir")
.getAbsoluteFile();
tmpDir = new File("target",
TestContainerManagerWithLCE.class.getName() + "-tmpDir");
}
@Override
public void setup() throws IOException {
// Don't run the test if the binary is not available.
if (!shouldRunTest()) {
LOG.info("LCE binary path is not passed. Not running the test");
return;
}
super.setup();
localFS.setPermission(new Path(localDir.getCanonicalPath()),
new FsPermission(
(short) 0777));
localFS.setPermission(new Path(tmpDir.getCanonicalPath()),
new FsPermission(
(short) 0777));
}
@After
@Override
public void tearDown() throws IOException, InterruptedException {
if (shouldRunTest()) {
super.tearDown();
}
}
@Override
public void testContainerSetup() throws Exception, InterruptedException,
YarnException {
// Don't run the test if the binary is not available.
if (!shouldRunTest()) {
LOG.info("LCE binary path is not passed. Not running the test");
return;
}
LOG.info("Running testContainerSetup");
super.testContainerSetup();
}
@Override
public void testContainerManagerInitialization() throws IOException {
// Don't run the test if the binary is not available.
if (!shouldRunTest()) {
LOG.info("LCE binary path is not passed. Not running the test");
return;
}
LOG.info("Running testContainerManagerInitialization");
super.testContainerManagerInitialization();
}
@Override
public void testContainerLaunchAndStop() throws IOException,
InterruptedException, YarnException {
// Don't run the test if the binary is not available.
if (!shouldRunTest()) {
LOG.info("LCE binary path is not passed. Not running the test");
return;
}
LOG.info("Running testContainerLaunchAndStop");
super.testContainerLaunchAndStop();
}
@Override
public void testContainerLaunchAndExitSuccess() throws IOException,
InterruptedException, YarnException {
// Don't run the test if the binary is not available.
if (!shouldRunTest()) {
LOG.info("LCE binary path is not passed. Not running the test");
return;
}
LOG.info("Running testContainerLaunchAndExitSuccess");
super.testContainerLaunchAndExitSuccess();
}
@Override
public void testContainerLaunchAndExitFailure() throws IOException,
InterruptedException, YarnException {
// Don't run the test if the binary is not available.
if (!shouldRunTest()) {
LOG.info("LCE binary path is not passed. Not running the test");
return;
}
LOG.info("Running testContainerLaunchAndExitFailure");
super.testContainerLaunchAndExitFailure();
}
@Override
public void testLocalFilesCleanup() throws InterruptedException,
IOException, YarnException {
// Don't run the test if the binary is not available.
if (!shouldRunTest()) {
LOG.info("LCE binary path is not passed. Not running the test");
return;
}
LOG.info("Running testLocalFilesCleanup");
super.testLocalFilesCleanup();
}
@Override
public void testContainerLaunchFromPreviousRM() throws InterruptedException,
IOException, YarnException {
// Don't run the test if the binary is not available.
if (!shouldRunTest()) {
LOG.info("LCE binary path is not passed. Not running the test");
return;
}
LOG.info("Running testContainerLaunchFromPreviousRM");
super.testContainerLaunchFromPreviousRM();
}
@Override
public void testMultipleContainersLaunch() throws Exception {
// Don't run the test if the binary is not available.
if (!shouldRunTest()) {
LOG.info("LCE binary path is not passed. Not running the test");
return;
}
LOG.info("Running testContainerLaunchFromPreviousRM");
super.testMultipleContainersLaunch();
}
@Override
public void testMultipleContainersStopAndGetStatus() throws Exception {
// Don't run the test if the binary is not available.
if (!shouldRunTest()) {
LOG.info("LCE binary path is not passed. Not running the test");
return;
}
LOG.info("Running testContainerLaunchFromPreviousRM");
super.testMultipleContainersStopAndGetStatus();
}
@Override
public void testStartContainerFailureWithUnknownAuxService() throws Exception {
// Don't run the test if the binary is not available.
if (!shouldRunTest()) {
LOG.info("LCE binary path is not passed. Not running the test");
return;
}
LOG.info("Running testContainerLaunchFromPreviousRM");
super.testStartContainerFailureWithUnknownAuxService();
}
private boolean shouldRunTest() {
return System
.getProperty(YarnConfiguration.NM_LINUX_CONTAINER_EXECUTOR_PATH) != null;
}
@Override
protected ContainerExecutor createContainerExecutor() {
super.conf.set(YarnConfiguration.NM_LINUX_CONTAINER_EXECUTOR_PATH, System
.getProperty(YarnConfiguration.NM_LINUX_CONTAINER_EXECUTOR_PATH));
LinuxContainerExecutor linuxContainerExecutor =
new LinuxContainerExecutor();
linuxContainerExecutor.setConf(super.conf);
return linuxContainerExecutor;
}
}
| 6,950 | 32.57971 | 87 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestContainerExecutor.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.nodemanager;
import java.util.Arrays;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.util.Shell;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.server.nodemanager.util.NodeManagerHardwareUtils;
import org.apache.hadoop.yarn.util.ResourceCalculatorPlugin;
import org.junit.Assert;
import org.junit.Test;
import static org.junit.Assert.*;
import static org.junit.Assume.assumeTrue;
public class TestContainerExecutor {
private ContainerExecutor containerExecutor = new DefaultContainerExecutor();
@Test (timeout = 5000)
public void testRunCommandNoPriority() throws Exception {
Configuration conf = new Configuration();
String[] command = containerExecutor.getRunCommand("echo", "group1", "user", null, conf);
assertTrue("first command should be the run command for the platform",
command[0].equals(Shell.WINUTILS) || command[0].equals("bash"));
}
@Test (timeout = 5000)
public void testRunCommandwithPriority() throws Exception {
Configuration conf = new Configuration();
conf.setInt(YarnConfiguration.NM_CONTAINER_EXECUTOR_SCHED_PRIORITY, 2);
String[] command = containerExecutor.getRunCommand("echo", "group1", "user", null, conf);
if (Shell.WINDOWS) {
// windows doesn't currently support
assertEquals("first command should be the run command for the platform",
Shell.WINUTILS, command[0]);
} else {
assertEquals("first command should be nice", "nice", command[0]);
assertEquals("second command should be -n", "-n", command[1]);
assertEquals("third command should be the priority", Integer.toString(2),
command[2]);
}
// test with negative number
conf.setInt(YarnConfiguration.NM_CONTAINER_EXECUTOR_SCHED_PRIORITY, -5);
command = containerExecutor.getRunCommand("echo", "group1", "user", null, conf);
if (Shell.WINDOWS) {
// windows doesn't currently support
assertEquals("first command should be the run command for the platform",
Shell.WINUTILS, command[0]);
} else {
assertEquals("first command should be nice", "nice", command[0]);
assertEquals("second command should be -n", "-n", command[1]);
assertEquals("third command should be the priority", Integer.toString(-5),
command[2]);
}
}
@Test (timeout = 5000)
public void testRunCommandWithNoResources() {
// Windows only test
assumeTrue(Shell.WINDOWS);
Configuration conf = new Configuration();
String[] command = containerExecutor.getRunCommand("echo", "group1", null, null,
conf, Resource.newInstance(1024, 1));
// Assert the cpu and memory limits are set correctly in the command
String[] expected = { Shell.WINUTILS, "task", "create", "-m", "-1", "-c",
"-1", "group1", "cmd /c " + "echo" };
Assert.assertTrue(Arrays.equals(expected, command));
}
@Test (timeout = 5000)
public void testRunCommandWithMemoryOnlyResources() {
// Windows only test
assumeTrue(Shell.WINDOWS);
Configuration conf = new Configuration();
conf.set(YarnConfiguration.NM_WINDOWS_CONTAINER_MEMORY_LIMIT_ENABLED, "true");
String[] command = containerExecutor.getRunCommand("echo", "group1", null, null,
conf, Resource.newInstance(1024, 1));
// Assert the cpu and memory limits are set correctly in the command
String[] expected = { Shell.WINUTILS, "task", "create", "-m", "1024", "-c",
"-1", "group1", "cmd /c " + "echo" };
Assert.assertTrue(Arrays.equals(expected, command));
}
@Test (timeout = 5000)
public void testRunCommandWithCpuAndMemoryResources() {
// Windows only test
assumeTrue(Shell.WINDOWS);
int containerCores = 1;
Configuration conf = new Configuration();
conf.set(YarnConfiguration.NM_WINDOWS_CONTAINER_CPU_LIMIT_ENABLED, "true");
conf.set(YarnConfiguration.NM_WINDOWS_CONTAINER_MEMORY_LIMIT_ENABLED, "true");
String[] command =
containerExecutor.getRunCommand("echo", "group1", null, null, conf,
Resource.newInstance(1024, 1));
int nodeVCores = NodeManagerHardwareUtils.getVCores(conf);
Assert.assertEquals(YarnConfiguration.DEFAULT_NM_VCORES, nodeVCores);
int cpuRate = Math.min(10000, (containerCores * 10000) / nodeVCores);
// Assert the cpu and memory limits are set correctly in the command
String[] expected =
{Shell.WINUTILS, "task", "create", "-m", "1024", "-c",
String.valueOf(cpuRate), "group1", "cmd /c " + "echo" };
Assert.assertEquals(Arrays.toString(expected), Arrays.toString(command));
conf.setBoolean(YarnConfiguration.NM_ENABLE_HARDWARE_CAPABILITY_DETECTION,
true);
int nodeCPUs = NodeManagerHardwareUtils.getNodeCPUs(conf);
float yarnCPUs = NodeManagerHardwareUtils.getContainersCPUs(conf);
nodeVCores = NodeManagerHardwareUtils.getVCores(conf);
Assert.assertEquals(nodeCPUs, (int) yarnCPUs);
Assert.assertEquals(nodeCPUs, nodeVCores);
command =
containerExecutor.getRunCommand("echo", "group1", null, null, conf,
Resource.newInstance(1024, 1));
cpuRate = Math.min(10000, (containerCores * 10000) / nodeVCores);
expected[6] = String.valueOf(cpuRate);
Assert.assertEquals(Arrays.toString(expected), Arrays.toString(command));
int yarnCpuLimit = 80;
conf.setInt(YarnConfiguration.NM_RESOURCE_PERCENTAGE_PHYSICAL_CPU_LIMIT,
yarnCpuLimit);
yarnCPUs = NodeManagerHardwareUtils.getContainersCPUs(conf);
nodeVCores = NodeManagerHardwareUtils.getVCores(conf);
Assert.assertEquals(nodeCPUs * 0.8, yarnCPUs, 0.01);
if (nodeCPUs == 1) {
Assert.assertEquals(1, nodeVCores);
} else {
Assert.assertEquals((int) (nodeCPUs * 0.8), nodeVCores);
}
command =
containerExecutor.getRunCommand("echo", "group1", null, null, conf,
Resource.newInstance(1024, 1));
// we should get 100 * (1/nodeVcores) of 80% of CPU
int containerPerc = (yarnCpuLimit * containerCores) / nodeVCores;
cpuRate = Math.min(10000, 100 * containerPerc);
expected[6] = String.valueOf(cpuRate);
Assert.assertEquals(Arrays.toString(expected), Arrays.toString(command));
}
}
| 7,143 | 42.560976 | 93 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/MockNodeStatusUpdater.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.nodemanager;
import java.io.IOException;
import java.nio.ByteBuffer;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.yarn.event.Dispatcher;
import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.factories.RecordFactory;
import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
import org.apache.hadoop.yarn.server.api.ResourceTracker;
import org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatRequest;
import org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatResponse;
import org.apache.hadoop.yarn.server.api.protocolrecords.RegisterNodeManagerRequest;
import org.apache.hadoop.yarn.server.api.protocolrecords.RegisterNodeManagerResponse;
import org.apache.hadoop.yarn.server.api.protocolrecords.UnRegisterNodeManagerRequest;
import org.apache.hadoop.yarn.server.api.protocolrecords.UnRegisterNodeManagerResponse;
import org.apache.hadoop.yarn.server.api.records.MasterKey;
import org.apache.hadoop.yarn.server.api.records.NodeStatus;
import org.apache.hadoop.yarn.server.api.records.impl.pb.MasterKeyPBImpl;
import org.apache.hadoop.yarn.server.nodemanager.metrics.NodeManagerMetrics;
import org.apache.hadoop.yarn.server.utils.YarnServerBuilderUtils;
/**
* This class allows a node manager to run without without communicating with a
* real RM.
*/
public class MockNodeStatusUpdater extends NodeStatusUpdaterImpl {
static final Log LOG = LogFactory.getLog(MockNodeStatusUpdater.class);
private static final RecordFactory recordFactory = RecordFactoryProvider
.getRecordFactory(null);
private ResourceTracker resourceTracker;
public MockNodeStatusUpdater(Context context, Dispatcher dispatcher,
NodeHealthCheckerService healthChecker, NodeManagerMetrics metrics) {
super(context, dispatcher, healthChecker, metrics);
resourceTracker = createResourceTracker();
}
protected ResourceTracker createResourceTracker() {
return new MockResourceTracker();
}
@Override
protected ResourceTracker getRMClient() {
return resourceTracker;
}
@Override
protected void stopRMProxy() {
return;
}
protected static class MockResourceTracker implements ResourceTracker {
private int heartBeatID;
@Override
public RegisterNodeManagerResponse registerNodeManager(
RegisterNodeManagerRequest request) throws YarnException,
IOException {
RegisterNodeManagerResponse response = recordFactory
.newRecordInstance(RegisterNodeManagerResponse.class);
MasterKey masterKey = new MasterKeyPBImpl();
masterKey.setKeyId(123);
masterKey.setBytes(ByteBuffer.wrap(new byte[] { new Integer(123)
.byteValue() }));
response.setContainerTokenMasterKey(masterKey);
response.setNMTokenMasterKey(masterKey);
return response;
}
@Override
public NodeHeartbeatResponse nodeHeartbeat(NodeHeartbeatRequest request)
throws YarnException, IOException {
NodeStatus nodeStatus = request.getNodeStatus();
LOG.info("Got heartbeat number " + heartBeatID);
nodeStatus.setResponseId(heartBeatID++);
NodeHeartbeatResponse nhResponse = YarnServerBuilderUtils
.newNodeHeartbeatResponse(heartBeatID, null, null,
null, null, null, 1000L);
return nhResponse;
}
@Override
public UnRegisterNodeManagerResponse unRegisterNodeManager(
UnRegisterNodeManagerRequest request) throws YarnException, IOException {
return recordFactory
.newRecordInstance(UnRegisterNodeManagerResponse.class);
}
}
}
| 4,474 | 38.254386 | 87 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/DummyContainerManager.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.nodemanager;
import static org.junit.Assert.fail;
import java.util.Collection;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.security.ContainerTokenIdentifier;
import org.apache.hadoop.yarn.security.NMTokenIdentifier;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.ContainerManagerImpl;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.Application;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.ApplicationEvent;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.ApplicationEventType;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.ApplicationInitedEvent;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerEvent;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerEventType;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerExitEvent;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerResourceLocalizedEvent;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainersLauncher;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainersLauncherEvent;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.LocalResourceRequest;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ResourceLocalizationService;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.ApplicationLocalizationEvent;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.ContainerLocalizationEvent;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.ContainerLocalizationRequestEvent;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.LocalizationEvent;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.loghandler.LogHandler;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.loghandler.event.LogHandlerEvent;
import org.apache.hadoop.yarn.server.nodemanager.metrics.NodeManagerMetrics;
import org.apache.hadoop.yarn.server.security.ApplicationACLsManager;
public class DummyContainerManager extends ContainerManagerImpl {
private static final Log LOG = LogFactory
.getLog(DummyContainerManager.class);
public DummyContainerManager(Context context, ContainerExecutor exec,
DeletionService deletionContext, NodeStatusUpdater nodeStatusUpdater,
NodeManagerMetrics metrics,
ApplicationACLsManager applicationACLsManager,
LocalDirsHandlerService dirsHandler) {
super(context, exec, deletionContext, nodeStatusUpdater, metrics,
applicationACLsManager, dirsHandler);
}
@Override
@SuppressWarnings("unchecked")
protected ResourceLocalizationService createResourceLocalizationService(
ContainerExecutor exec, DeletionService deletionContext, Context context) {
return new ResourceLocalizationService(super.dispatcher, exec,
deletionContext, super.dirsHandler, context) {
@Override
public void handle(LocalizationEvent event) {
switch (event.getType()) {
case INIT_APPLICATION_RESOURCES:
Application app =
((ApplicationLocalizationEvent) event).getApplication();
// Simulate event from ApplicationLocalization.
dispatcher.getEventHandler().handle(new ApplicationInitedEvent(
app.getAppId()));
break;
case INIT_CONTAINER_RESOURCES:
ContainerLocalizationRequestEvent rsrcReqs =
(ContainerLocalizationRequestEvent) event;
// simulate localization of all requested resources
for (Collection<LocalResourceRequest> rc : rsrcReqs
.getRequestedResources().values()) {
for (LocalResourceRequest req : rc) {
LOG.info("DEBUG: " + req + ":"
+ rsrcReqs.getContainer().getContainerId());
dispatcher.getEventHandler().handle(
new ContainerResourceLocalizedEvent(rsrcReqs.getContainer()
.getContainerId(), req, new Path("file:///local"
+ req.getPath().toUri().getPath())));
}
}
break;
case CLEANUP_CONTAINER_RESOURCES:
Container container =
((ContainerLocalizationEvent) event).getContainer();
// TODO: delete the container dir
this.dispatcher.getEventHandler().handle(
new ContainerEvent(container.getContainerId(),
ContainerEventType.CONTAINER_RESOURCES_CLEANEDUP));
break;
case DESTROY_APPLICATION_RESOURCES:
Application application =
((ApplicationLocalizationEvent) event).getApplication();
// decrement reference counts of all resources associated with this
// app
this.dispatcher.getEventHandler().handle(
new ApplicationEvent(application.getAppId(),
ApplicationEventType.APPLICATION_RESOURCES_CLEANEDUP));
break;
default:
fail("Unexpected event: " + event.getType());
}
}
};
}
@Override
protected UserGroupInformation getRemoteUgi() throws YarnException {
ApplicationId appId = ApplicationId.newInstance(0, 0);
ApplicationAttemptId appAttemptId =
ApplicationAttemptId.newInstance(appId, 1);
UserGroupInformation ugi =
UserGroupInformation.createRemoteUser(appAttemptId.toString());
ugi.addTokenIdentifier(new NMTokenIdentifier(appAttemptId, getContext()
.getNodeId(), "testuser", getContext().getNMTokenSecretManager().getCurrentKey()
.getKeyId()));
return ugi;
}
@Override
@SuppressWarnings("unchecked")
protected ContainersLauncher createContainersLauncher(Context context,
ContainerExecutor exec) {
return new ContainersLauncher(context, super.dispatcher, exec,
super.dirsHandler, this) {
@Override
public void handle(ContainersLauncherEvent event) {
Container container = event.getContainer();
ContainerId containerId = container.getContainerId();
switch (event.getType()) {
case LAUNCH_CONTAINER:
dispatcher.getEventHandler().handle(
new ContainerEvent(containerId,
ContainerEventType.CONTAINER_LAUNCHED));
break;
case CLEANUP_CONTAINER:
dispatcher.getEventHandler().handle(
new ContainerExitEvent(containerId,
ContainerEventType.CONTAINER_KILLED_ON_REQUEST, 0,
"Container exited with exit code 0."));
break;
}
}
};
}
@Override
protected LogHandler createLogHandler(Configuration conf,
Context context, DeletionService deletionService) {
return new LogHandler() {
@Override
public void handle(LogHandlerEvent event) {
switch (event.getType()) {
case APPLICATION_STARTED:
break;
case CONTAINER_FINISHED:
break;
case APPLICATION_FINISHED:
break;
default:
// Ignore
}
}
};
}
@Override
public void setBlockNewContainerRequests(boolean blockNewContainerRequests) {
// do nothing
}
@Override
protected void authorizeStartRequest(NMTokenIdentifier nmTokenIdentifier,
ContainerTokenIdentifier containerTokenIdentifier) throws YarnException {
// do nothing
}
@Override
protected void authorizeGetAndStopContainerRequest(ContainerId containerId,
Container container, boolean stopRequest, NMTokenIdentifier identifier) throws YarnException {
// do nothing
}
}
| 9,245 | 43.451923 | 116 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestEventFlow.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.nodemanager;
import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import org.apache.hadoop.fs.FileContext;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.yarn.api.protocolrecords.StartContainerRequest;
import org.apache.hadoop.yarn.api.protocolrecords.StartContainersRequest;
import org.apache.hadoop.yarn.api.protocolrecords.StopContainersRequest;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
import org.apache.hadoop.yarn.api.records.ContainerState;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.event.AsyncDispatcher;
import org.apache.hadoop.yarn.event.Dispatcher;
import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.factories.RecordFactory;
import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
import org.apache.hadoop.yarn.server.api.ResourceTracker;
import org.apache.hadoop.yarn.server.nodemanager.NodeManager.NMContext;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.BaseContainerManagerTest;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.TestContainerManager;
import org.apache.hadoop.yarn.server.nodemanager.metrics.NodeManagerMetrics;
import org.apache.hadoop.yarn.server.nodemanager.recovery.NMNullStateStoreService;
import org.apache.hadoop.yarn.server.nodemanager.security.NMContainerTokenSecretManager;
import org.apache.hadoop.yarn.server.nodemanager.security.NMTokenSecretManagerInNM;
import org.apache.hadoop.yarn.server.security.ApplicationACLsManager;
import org.junit.Test;
public class TestEventFlow {
private static final RecordFactory recordFactory =
RecordFactoryProvider.getRecordFactory(null);
private static File localDir = new File("target",
TestEventFlow.class.getName() + "-localDir").getAbsoluteFile();
private static File localLogDir = new File("target",
TestEventFlow.class.getName() + "-localLogDir").getAbsoluteFile();
private static File remoteLogDir = new File("target",
TestEventFlow.class.getName() + "-remoteLogDir").getAbsoluteFile();
private static final long SIMULATED_RM_IDENTIFIER = 1234;
@Test
public void testSuccessfulContainerLaunch() throws InterruptedException,
IOException, YarnException {
FileContext localFS = FileContext.getLocalFSFileContext();
localFS.delete(new Path(localDir.getAbsolutePath()), true);
localFS.delete(new Path(localLogDir.getAbsolutePath()), true);
localFS.delete(new Path(remoteLogDir.getAbsolutePath()), true);
localDir.mkdir();
localLogDir.mkdir();
remoteLogDir.mkdir();
YarnConfiguration conf = new YarnConfiguration();
Context context = new NMContext(new NMContainerTokenSecretManager(conf),
new NMTokenSecretManagerInNM(), null, null,
new NMNullStateStoreService()) {
@Override
public int getHttpPort() {
return 1234;
}
};
conf.set(YarnConfiguration.NM_LOCAL_DIRS, localDir.getAbsolutePath());
conf.set(YarnConfiguration.NM_LOG_DIRS, localLogDir.getAbsolutePath());
conf.set(YarnConfiguration.NM_REMOTE_APP_LOG_DIR,
remoteLogDir.getAbsolutePath());
ContainerExecutor exec = new DefaultContainerExecutor();
exec.setConf(conf);
DeletionService del = new DeletionService(exec);
Dispatcher dispatcher = new AsyncDispatcher();
LocalDirsHandlerService dirsHandler = new LocalDirsHandlerService();
NodeHealthCheckerService healthChecker = new NodeHealthCheckerService(
NodeManager.getNodeHealthScriptRunner(conf), dirsHandler);
healthChecker.init(conf);
NodeManagerMetrics metrics = NodeManagerMetrics.create();
NodeStatusUpdater nodeStatusUpdater =
new NodeStatusUpdaterImpl(context, dispatcher, healthChecker, metrics) {
@Override
protected ResourceTracker getRMClient() {
return new LocalRMInterface();
};
@Override
protected void stopRMProxy() {
return;
}
@Override
protected void startStatusUpdater() {
return; // Don't start any updating thread.
}
@Override
public long getRMIdentifier() {
return SIMULATED_RM_IDENTIFIER;
}
};
DummyContainerManager containerManager =
new DummyContainerManager(context, exec, del, nodeStatusUpdater,
metrics, new ApplicationACLsManager(conf), dirsHandler);
nodeStatusUpdater.init(conf);
((NMContext)context).setContainerManager(containerManager);
nodeStatusUpdater.start();
containerManager.init(conf);
containerManager.start();
ContainerLaunchContext launchContext =
recordFactory.newRecordInstance(ContainerLaunchContext.class);
ApplicationId applicationId = ApplicationId.newInstance(0, 0);
ApplicationAttemptId applicationAttemptId =
ApplicationAttemptId.newInstance(applicationId, 0);
ContainerId cID = ContainerId.newContainerId(applicationAttemptId, 0);
String user = "testing";
StartContainerRequest scRequest =
StartContainerRequest.newInstance(launchContext,
TestContainerManager.createContainerToken(cID,
SIMULATED_RM_IDENTIFIER, context.getNodeId(), user,
context.getContainerTokenSecretManager()));
List<StartContainerRequest> list = new ArrayList<StartContainerRequest>();
list.add(scRequest);
StartContainersRequest allRequests =
StartContainersRequest.newInstance(list);
containerManager.startContainers(allRequests);
BaseContainerManagerTest.waitForContainerState(containerManager, cID,
ContainerState.RUNNING);
List<ContainerId> containerIds = new ArrayList<ContainerId>();
containerIds.add(cID);
StopContainersRequest stopRequest =
StopContainersRequest.newInstance(containerIds);
containerManager.stopContainers(stopRequest);
BaseContainerManagerTest.waitForContainerState(containerManager, cID,
ContainerState.COMPLETE);
containerManager.stop();
}
}
| 7,054 | 40.25731 | 91 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.nodemanager;
import static org.apache.hadoop.yarn.server.utils.YarnServerBuilderUtils.newNodeHeartbeatResponse;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
import java.io.EOFException;
import java.io.File;
import java.io.IOException;
import java.net.InetAddress;
import java.net.InetSocketAddress;
import java.net.UnknownHostException;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.CyclicBarrier;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileContext;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.DataOutputBuffer;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.retry.RetryPolicy;
import org.apache.hadoop.io.retry.RetryProxy;
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.security.token.delegation.web.DelegationTokenIdentifier;
import org.apache.hadoop.service.Service.STATE;
import org.apache.hadoop.service.ServiceOperations;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
import org.apache.hadoop.yarn.api.records.ContainerState;
import org.apache.hadoop.yarn.api.records.ContainerStatus;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.api.records.Token;
import org.apache.hadoop.yarn.client.RMProxy;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.event.Dispatcher;
import org.apache.hadoop.yarn.event.EventHandler;
import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
import org.apache.hadoop.yarn.factories.RecordFactory;
import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
import org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.NodeHeartbeatResponseProto;
import org.apache.hadoop.yarn.security.ContainerTokenIdentifier;
import org.apache.hadoop.yarn.server.api.ResourceTracker;
import org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatRequest;
import org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatResponse;
import org.apache.hadoop.yarn.server.api.protocolrecords.RegisterNodeManagerRequest;
import org.apache.hadoop.yarn.server.api.protocolrecords.RegisterNodeManagerResponse;
import org.apache.hadoop.yarn.server.api.protocolrecords.UnRegisterNodeManagerRequest;
import org.apache.hadoop.yarn.server.api.protocolrecords.UnRegisterNodeManagerResponse;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.NodeHeartbeatResponsePBImpl;
import org.apache.hadoop.yarn.server.api.records.MasterKey;
import org.apache.hadoop.yarn.server.api.records.NodeAction;
import org.apache.hadoop.yarn.server.api.records.NodeStatus;
import org.apache.hadoop.yarn.server.api.records.impl.pb.MasterKeyPBImpl;
import org.apache.hadoop.yarn.server.nodemanager.NodeManager.NMContext;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.ContainerManagerImpl;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.Application;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.ApplicationState;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerImpl;
import org.apache.hadoop.yarn.server.nodemanager.metrics.NodeManagerMetrics;
import org.apache.hadoop.yarn.server.nodemanager.recovery.NMNullStateStoreService;
import org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService;
import org.apache.hadoop.yarn.server.nodemanager.security.NMContainerTokenSecretManager;
import org.apache.hadoop.yarn.server.nodemanager.security.NMTokenSecretManagerInNM;
import org.apache.hadoop.yarn.server.security.ApplicationACLsManager;
import org.apache.hadoop.yarn.server.utils.BuilderUtils;
import org.apache.hadoop.yarn.server.utils.YarnServerBuilderUtils;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
@SuppressWarnings("rawtypes")
public class TestNodeStatusUpdater {
// temp fix until metrics system can auto-detect itself running in unit test:
static {
DefaultMetricsSystem.setMiniClusterMode(true);
}
static final Log LOG = LogFactory.getLog(TestNodeStatusUpdater.class);
static final File basedir =
new File("target", TestNodeStatusUpdater.class.getName());
static final File nmLocalDir = new File(basedir, "nm0");
static final File tmpDir = new File(basedir, "tmpDir");
static final File remoteLogsDir = new File(basedir, "remotelogs");
static final File logsDir = new File(basedir, "logs");
private static final RecordFactory recordFactory = RecordFactoryProvider
.getRecordFactory(null);
volatile int heartBeatID = 0;
volatile Throwable nmStartError = null;
private final List<NodeId> registeredNodes = new ArrayList<NodeId>();
private boolean triggered = false;
private Configuration conf;
private NodeManager nm;
private AtomicBoolean assertionFailedInThread = new AtomicBoolean(false);
@Before
public void setUp() {
nmLocalDir.mkdirs();
tmpDir.mkdirs();
logsDir.mkdirs();
remoteLogsDir.mkdirs();
conf = createNMConfig();
}
@After
public void tearDown() {
this.registeredNodes.clear();
heartBeatID = 0;
ServiceOperations.stop(nm);
assertionFailedInThread.set(false);
DefaultMetricsSystem.shutdown();
}
public static MasterKey createMasterKey() {
MasterKey masterKey = new MasterKeyPBImpl();
masterKey.setKeyId(123);
masterKey.setBytes(ByteBuffer.wrap(new byte[] { new Integer(123)
.byteValue() }));
return masterKey;
}
private class MyResourceTracker implements ResourceTracker {
private final Context context;
public MyResourceTracker(Context context) {
this.context = context;
}
@Override
public RegisterNodeManagerResponse registerNodeManager(
RegisterNodeManagerRequest request) throws YarnException,
IOException {
NodeId nodeId = request.getNodeId();
Resource resource = request.getResource();
LOG.info("Registering " + nodeId.toString());
// NOTE: this really should be checking against the config value
InetSocketAddress expected = NetUtils.getConnectAddress(
conf.getSocketAddr(YarnConfiguration.NM_ADDRESS, null, -1));
Assert.assertEquals(NetUtils.getHostPortString(expected), nodeId.toString());
Assert.assertEquals(5 * 1024, resource.getMemory());
registeredNodes.add(nodeId);
RegisterNodeManagerResponse response = recordFactory
.newRecordInstance(RegisterNodeManagerResponse.class);
response.setContainerTokenMasterKey(createMasterKey());
response.setNMTokenMasterKey(createMasterKey());
return response;
}
private Map<ApplicationId, List<ContainerStatus>> getAppToContainerStatusMap(
List<ContainerStatus> containers) {
Map<ApplicationId, List<ContainerStatus>> map =
new HashMap<ApplicationId, List<ContainerStatus>>();
for (ContainerStatus cs : containers) {
ApplicationId applicationId =
cs.getContainerId().getApplicationAttemptId().getApplicationId();
List<ContainerStatus> appContainers = map.get(applicationId);
if (appContainers == null) {
appContainers = new ArrayList<ContainerStatus>();
map.put(applicationId, appContainers);
}
appContainers.add(cs);
}
return map;
}
@Override
public NodeHeartbeatResponse nodeHeartbeat(NodeHeartbeatRequest request)
throws YarnException, IOException {
NodeStatus nodeStatus = request.getNodeStatus();
LOG.info("Got heartbeat number " + heartBeatID);
NodeManagerMetrics mockMetrics = mock(NodeManagerMetrics.class);
Dispatcher mockDispatcher = mock(Dispatcher.class);
EventHandler mockEventHandler = mock(EventHandler.class);
when(mockDispatcher.getEventHandler()).thenReturn(mockEventHandler);
NMStateStoreService stateStore = new NMNullStateStoreService();
nodeStatus.setResponseId(heartBeatID++);
Map<ApplicationId, List<ContainerStatus>> appToContainers =
getAppToContainerStatusMap(nodeStatus.getContainersStatuses());
ApplicationId appId1 = ApplicationId.newInstance(0, 1);
ApplicationId appId2 = ApplicationId.newInstance(0, 2);
if (heartBeatID == 1) {
Assert.assertEquals(0, nodeStatus.getContainersStatuses().size());
// Give a container to the NM.
ApplicationAttemptId appAttemptID =
ApplicationAttemptId.newInstance(appId1, 0);
ContainerId firstContainerID =
ContainerId.newContainerId(appAttemptID, heartBeatID);
ContainerLaunchContext launchContext = recordFactory
.newRecordInstance(ContainerLaunchContext.class);
Resource resource = BuilderUtils.newResource(2, 1);
long currentTime = System.currentTimeMillis();
String user = "testUser";
ContainerTokenIdentifier containerToken = BuilderUtils
.newContainerTokenIdentifier(BuilderUtils.newContainerToken(
firstContainerID, InetAddress.getByName("localhost")
.getCanonicalHostName(), 1234, user, resource,
currentTime + 10000, 123, "password".getBytes(), currentTime));
Container container = new ContainerImpl(conf, mockDispatcher,
stateStore, launchContext, null, mockMetrics, containerToken);
this.context.getContainers().put(firstContainerID, container);
} else if (heartBeatID == 2) {
// Checks on the RM end
Assert.assertEquals("Number of applications should only be one!", 1,
nodeStatus.getContainersStatuses().size());
Assert.assertEquals("Number of container for the app should be one!",
1, appToContainers.get(appId1).size());
// Checks on the NM end
ConcurrentMap<ContainerId, Container> activeContainers =
this.context.getContainers();
Assert.assertEquals(1, activeContainers.size());
// Give another container to the NM.
ApplicationAttemptId appAttemptID =
ApplicationAttemptId.newInstance(appId2, 0);
ContainerId secondContainerID =
ContainerId.newContainerId(appAttemptID, heartBeatID);
ContainerLaunchContext launchContext = recordFactory
.newRecordInstance(ContainerLaunchContext.class);
long currentTime = System.currentTimeMillis();
String user = "testUser";
Resource resource = BuilderUtils.newResource(3, 1);
ContainerTokenIdentifier containerToken = BuilderUtils
.newContainerTokenIdentifier(BuilderUtils.newContainerToken(
secondContainerID, InetAddress.getByName("localhost")
.getCanonicalHostName(), 1234, user, resource,
currentTime + 10000, 123, "password".getBytes(), currentTime));
Container container = new ContainerImpl(conf, mockDispatcher,
stateStore, launchContext, null, mockMetrics, containerToken);
this.context.getContainers().put(secondContainerID, container);
} else if (heartBeatID == 3) {
// Checks on the RM end
Assert.assertEquals("Number of applications should have two!", 2,
appToContainers.size());
Assert.assertEquals("Number of container for the app-1 should be only one!",
1, appToContainers.get(appId1).size());
Assert.assertEquals("Number of container for the app-2 should be only one!",
1, appToContainers.get(appId2).size());
// Checks on the NM end
ConcurrentMap<ContainerId, Container> activeContainers =
this.context.getContainers();
Assert.assertEquals(2, activeContainers.size());
}
NodeHeartbeatResponse nhResponse = YarnServerBuilderUtils.
newNodeHeartbeatResponse(heartBeatID, null, null, null, null, null,
1000L);
return nhResponse;
}
@Override
public UnRegisterNodeManagerResponse unRegisterNodeManager(
UnRegisterNodeManagerRequest request) throws YarnException, IOException {
return recordFactory
.newRecordInstance(UnRegisterNodeManagerResponse.class);
}
}
private class MyNodeStatusUpdater extends NodeStatusUpdaterImpl {
public ResourceTracker resourceTracker;
private Context context;
public MyNodeStatusUpdater(Context context, Dispatcher dispatcher,
NodeHealthCheckerService healthChecker, NodeManagerMetrics metrics) {
super(context, dispatcher, healthChecker, metrics);
this.context = context;
resourceTracker = new MyResourceTracker(this.context);
}
@Override
protected ResourceTracker getRMClient() {
return resourceTracker;
}
@Override
protected void stopRMProxy() {
return;
}
}
// Test NodeStatusUpdater sends the right container statuses each time it
// heart beats.
private class MyNodeStatusUpdater2 extends NodeStatusUpdaterImpl {
public ResourceTracker resourceTracker;
public MyNodeStatusUpdater2(Context context, Dispatcher dispatcher,
NodeHealthCheckerService healthChecker, NodeManagerMetrics metrics) {
super(context, dispatcher, healthChecker, metrics);
resourceTracker = new MyResourceTracker4(context);
}
@Override
protected ResourceTracker getRMClient() {
return resourceTracker;
}
@Override
protected void stopRMProxy() {
return;
}
}
private class MyNodeStatusUpdater3 extends NodeStatusUpdaterImpl {
public ResourceTracker resourceTracker;
private Context context;
public MyNodeStatusUpdater3(Context context, Dispatcher dispatcher,
NodeHealthCheckerService healthChecker, NodeManagerMetrics metrics) {
super(context, dispatcher, healthChecker, metrics);
this.context = context;
this.resourceTracker = new MyResourceTracker3(this.context);
}
@Override
protected ResourceTracker getRMClient() {
return resourceTracker;
}
@Override
protected void stopRMProxy() {
return;
}
@Override
protected boolean isTokenKeepAliveEnabled(Configuration conf) {
return true;
}
}
private class MyNodeStatusUpdater4 extends NodeStatusUpdaterImpl {
private final long rmStartIntervalMS;
private final boolean rmNeverStart;
public ResourceTracker resourceTracker;
public MyNodeStatusUpdater4(Context context, Dispatcher dispatcher,
NodeHealthCheckerService healthChecker, NodeManagerMetrics metrics,
long rmStartIntervalMS, boolean rmNeverStart) {
super(context, dispatcher, healthChecker, metrics);
this.rmStartIntervalMS = rmStartIntervalMS;
this.rmNeverStart = rmNeverStart;
}
@Override
protected void serviceStart() throws Exception {
//record the startup time
super.serviceStart();
}
@Override
protected ResourceTracker getRMClient() throws IOException {
RetryPolicy retryPolicy = RMProxy.createRetryPolicy(conf);
resourceTracker =
(ResourceTracker) RetryProxy.create(ResourceTracker.class,
new MyResourceTracker6(rmStartIntervalMS, rmNeverStart),
retryPolicy);
return resourceTracker;
}
private boolean isTriggered() {
return triggered;
}
@Override
protected void stopRMProxy() {
return;
}
}
private class MyNodeStatusUpdater5 extends NodeStatusUpdaterImpl {
private ResourceTracker resourceTracker;
private Configuration conf;
public MyNodeStatusUpdater5(Context context, Dispatcher dispatcher,
NodeHealthCheckerService healthChecker, NodeManagerMetrics metrics, Configuration conf) {
super(context, dispatcher, healthChecker, metrics);
resourceTracker = new MyResourceTracker5();
this.conf = conf;
}
@Override
protected ResourceTracker getRMClient() {
RetryPolicy retryPolicy = RMProxy.createRetryPolicy(conf);
return (ResourceTracker) RetryProxy.create(ResourceTracker.class,
resourceTracker, retryPolicy);
}
@Override
protected void stopRMProxy() {
return;
}
}
private class MyNodeManager extends NodeManager {
private MyNodeStatusUpdater3 nodeStatusUpdater;
@Override
protected NodeStatusUpdater createNodeStatusUpdater(Context context,
Dispatcher dispatcher, NodeHealthCheckerService healthChecker) {
this.nodeStatusUpdater =
new MyNodeStatusUpdater3(context, dispatcher, healthChecker, metrics);
return this.nodeStatusUpdater;
}
public MyNodeStatusUpdater3 getNodeStatusUpdater() {
return this.nodeStatusUpdater;
}
}
private class MyNodeManager2 extends NodeManager {
public boolean isStopped = false;
private NodeStatusUpdater nodeStatusUpdater;
private CyclicBarrier syncBarrier;
private Configuration conf;
public MyNodeManager2 (CyclicBarrier syncBarrier, Configuration conf) {
this.syncBarrier = syncBarrier;
this.conf = conf;
}
@Override
protected NodeStatusUpdater createNodeStatusUpdater(Context context,
Dispatcher dispatcher, NodeHealthCheckerService healthChecker) {
nodeStatusUpdater =
new MyNodeStatusUpdater5(context, dispatcher, healthChecker,
metrics, conf);
return nodeStatusUpdater;
}
@Override
protected void serviceStop() throws Exception {
System.out.println("Called stooppppp");
super.serviceStop();
isStopped = true;
ConcurrentMap<ApplicationId, Application> applications =
getNMContext().getApplications();
// ensure that applications are empty
if(!applications.isEmpty()) {
assertionFailedInThread.set(true);
}
syncBarrier.await(10000, TimeUnit.MILLISECONDS);
}
}
//
private class MyResourceTracker2 implements ResourceTracker {
public NodeAction heartBeatNodeAction = NodeAction.NORMAL;
public NodeAction registerNodeAction = NodeAction.NORMAL;
public String shutDownMessage = "";
public String rmVersion = "3.0.1";
@Override
public RegisterNodeManagerResponse registerNodeManager(
RegisterNodeManagerRequest request) throws YarnException,
IOException {
RegisterNodeManagerResponse response = recordFactory
.newRecordInstance(RegisterNodeManagerResponse.class);
response.setNodeAction(registerNodeAction );
response.setContainerTokenMasterKey(createMasterKey());
response.setNMTokenMasterKey(createMasterKey());
response.setDiagnosticsMessage(shutDownMessage);
response.setRMVersion(rmVersion);
return response;
}
@Override
public NodeHeartbeatResponse nodeHeartbeat(NodeHeartbeatRequest request)
throws YarnException, IOException {
NodeStatus nodeStatus = request.getNodeStatus();
nodeStatus.setResponseId(heartBeatID++);
NodeHeartbeatResponse nhResponse = YarnServerBuilderUtils.
newNodeHeartbeatResponse(heartBeatID, heartBeatNodeAction, null,
null, null, null, 1000L);
nhResponse.setDiagnosticsMessage(shutDownMessage);
return nhResponse;
}
@Override
public UnRegisterNodeManagerResponse unRegisterNodeManager(
UnRegisterNodeManagerRequest request) throws YarnException, IOException {
return recordFactory
.newRecordInstance(UnRegisterNodeManagerResponse.class);
}
}
private class MyResourceTracker3 implements ResourceTracker {
public NodeAction heartBeatNodeAction = NodeAction.NORMAL;
public NodeAction registerNodeAction = NodeAction.NORMAL;
private Map<ApplicationId, List<Long>> keepAliveRequests =
new HashMap<ApplicationId, List<Long>>();
private ApplicationId appId = BuilderUtils.newApplicationId(1, 1);
private final Context context;
MyResourceTracker3(Context context) {
this.context = context;
}
@Override
public RegisterNodeManagerResponse registerNodeManager(
RegisterNodeManagerRequest request) throws YarnException,
IOException {
RegisterNodeManagerResponse response =
recordFactory.newRecordInstance(RegisterNodeManagerResponse.class);
response.setNodeAction(registerNodeAction);
response.setContainerTokenMasterKey(createMasterKey());
response.setNMTokenMasterKey(createMasterKey());
return response;
}
@Override
public NodeHeartbeatResponse nodeHeartbeat(NodeHeartbeatRequest request)
throws YarnException, IOException {
LOG.info("Got heartBeatId: [" + heartBeatID +"]");
NodeStatus nodeStatus = request.getNodeStatus();
nodeStatus.setResponseId(heartBeatID++);
NodeHeartbeatResponse nhResponse = YarnServerBuilderUtils.
newNodeHeartbeatResponse(heartBeatID, heartBeatNodeAction, null,
null, null, null, 1000L);
if (nodeStatus.getKeepAliveApplications() != null
&& nodeStatus.getKeepAliveApplications().size() > 0) {
for (ApplicationId appId : nodeStatus.getKeepAliveApplications()) {
List<Long> list = keepAliveRequests.get(appId);
if (list == null) {
list = new LinkedList<Long>();
keepAliveRequests.put(appId, list);
}
list.add(System.currentTimeMillis());
}
}
if (heartBeatID == 2) {
LOG.info("Sending FINISH_APP for application: [" + appId + "]");
this.context.getApplications().put(appId, mock(Application.class));
nhResponse.addAllApplicationsToCleanup(Collections.singletonList(appId));
}
return nhResponse;
}
@Override
public UnRegisterNodeManagerResponse unRegisterNodeManager(
UnRegisterNodeManagerRequest request) throws YarnException, IOException {
return recordFactory
.newRecordInstance(UnRegisterNodeManagerResponse.class);
}
}
// Test NodeStatusUpdater sends the right container statuses each time it
// heart beats.
private Credentials expectedCredentials = new Credentials();
private class MyResourceTracker4 implements ResourceTracker {
public NodeAction registerNodeAction = NodeAction.NORMAL;
public NodeAction heartBeatNodeAction = NodeAction.NORMAL;
private Context context;
private final ContainerStatus containerStatus2 =
createContainerStatus(2, ContainerState.RUNNING);
private final ContainerStatus containerStatus3 =
createContainerStatus(3, ContainerState.COMPLETE);
private final ContainerStatus containerStatus4 =
createContainerStatus(4, ContainerState.RUNNING);
private final ContainerStatus containerStatus5 =
createContainerStatus(5, ContainerState.COMPLETE);
public MyResourceTracker4(Context context) {
// create app Credentials
org.apache.hadoop.security.token.Token<DelegationTokenIdentifier> token1 =
new org.apache.hadoop.security.token.Token<DelegationTokenIdentifier>();
token1.setKind(new Text("kind1"));
expectedCredentials.addToken(new Text("token1"), token1);
this.context = context;
}
@Override
public RegisterNodeManagerResponse registerNodeManager(
RegisterNodeManagerRequest request) throws YarnException, IOException {
RegisterNodeManagerResponse response =
recordFactory.newRecordInstance(RegisterNodeManagerResponse.class);
response.setNodeAction(registerNodeAction);
response.setContainerTokenMasterKey(createMasterKey());
response.setNMTokenMasterKey(createMasterKey());
return response;
}
@Override
public NodeHeartbeatResponse nodeHeartbeat(NodeHeartbeatRequest request)
throws YarnException, IOException {
List<ContainerId> finishedContainersPulledByAM = new ArrayList
<ContainerId>();
try {
if (heartBeatID == 0) {
Assert.assertEquals(0, request.getNodeStatus().getContainersStatuses()
.size());
Assert.assertEquals(0, context.getContainers().size());
} else if (heartBeatID == 1) {
List<ContainerStatus> statuses =
request.getNodeStatus().getContainersStatuses();
Assert.assertEquals(2, statuses.size());
Assert.assertEquals(2, context.getContainers().size());
boolean container2Exist = false, container3Exist = false;
for (ContainerStatus status : statuses) {
if (status.getContainerId().equals(
containerStatus2.getContainerId())) {
Assert.assertTrue(status.getState().equals(
containerStatus2.getState()));
container2Exist = true;
}
if (status.getContainerId().equals(
containerStatus3.getContainerId())) {
Assert.assertTrue(status.getState().equals(
containerStatus3.getState()));
container3Exist = true;
}
}
Assert.assertTrue(container2Exist && container3Exist);
// should throw exception that can be retried by the
// nodeStatusUpdaterRunnable, otherwise nm just shuts down and the
// test passes.
throw new YarnRuntimeException("Lost the heartbeat response");
} else if (heartBeatID == 2 || heartBeatID == 3) {
List<ContainerStatus> statuses =
request.getNodeStatus().getContainersStatuses();
if (heartBeatID == 2) {
// NM should send completed containers again, since the last
// heartbeat is lost.
Assert.assertEquals(4, statuses.size());
} else {
// NM should not send completed containers again, since the last
// heartbeat is successful.
Assert.assertEquals(2, statuses.size());
}
Assert.assertEquals(4, context.getContainers().size());
boolean container2Exist = false, container3Exist = false,
container4Exist = false, container5Exist = false;
for (ContainerStatus status : statuses) {
if (status.getContainerId().equals(
containerStatus2.getContainerId())) {
Assert.assertTrue(status.getState().equals(
containerStatus2.getState()));
container2Exist = true;
}
if (status.getContainerId().equals(
containerStatus3.getContainerId())) {
Assert.assertTrue(status.getState().equals(
containerStatus3.getState()));
container3Exist = true;
}
if (status.getContainerId().equals(
containerStatus4.getContainerId())) {
Assert.assertTrue(status.getState().equals(
containerStatus4.getState()));
container4Exist = true;
}
if (status.getContainerId().equals(
containerStatus5.getContainerId())) {
Assert.assertTrue(status.getState().equals(
containerStatus5.getState()));
container5Exist = true;
}
}
if (heartBeatID == 2) {
Assert.assertTrue(container2Exist && container3Exist
&& container4Exist && container5Exist);
} else {
// NM do not send completed containers again
Assert.assertTrue(container2Exist && !container3Exist
&& container4Exist && !container5Exist);
}
if (heartBeatID == 3) {
finishedContainersPulledByAM.add(containerStatus3.getContainerId());
}
} else if (heartBeatID == 4) {
List<ContainerStatus> statuses =
request.getNodeStatus().getContainersStatuses();
Assert.assertEquals(2, statuses.size());
// Container 3 is acked by AM, hence removed from context
Assert.assertEquals(3, context.getContainers().size());
boolean container3Exist = false;
for (ContainerStatus status : statuses) {
if (status.getContainerId().equals(
containerStatus3.getContainerId())) {
container3Exist = true;
}
}
Assert.assertFalse(container3Exist);
}
} catch (AssertionError error) {
error.printStackTrace();
assertionFailedInThread.set(true);
} finally {
heartBeatID++;
}
NodeStatus nodeStatus = request.getNodeStatus();
nodeStatus.setResponseId(heartBeatID);
NodeHeartbeatResponse nhResponse =
YarnServerBuilderUtils.newNodeHeartbeatResponse(heartBeatID,
heartBeatNodeAction, null, null, null, null, 1000L);
nhResponse.addContainersToBeRemovedFromNM(finishedContainersPulledByAM);
Map<ApplicationId, ByteBuffer> appCredentials =
new HashMap<ApplicationId, ByteBuffer>();
DataOutputBuffer dob = new DataOutputBuffer();
expectedCredentials.writeTokenStorageToStream(dob);
ByteBuffer byteBuffer1 =
ByteBuffer.wrap(dob.getData(), 0, dob.getLength());
appCredentials.put(ApplicationId.newInstance(1234, 1), byteBuffer1);
nhResponse.setSystemCredentialsForApps(appCredentials);
return nhResponse;
}
@Override
public UnRegisterNodeManagerResponse unRegisterNodeManager(
UnRegisterNodeManagerRequest request) throws YarnException, IOException {
return recordFactory
.newRecordInstance(UnRegisterNodeManagerResponse.class);
}
}
private class MyResourceTracker5 implements ResourceTracker {
public NodeAction registerNodeAction = NodeAction.NORMAL;
@Override
public RegisterNodeManagerResponse registerNodeManager(
RegisterNodeManagerRequest request) throws YarnException,
IOException {
RegisterNodeManagerResponse response = recordFactory
.newRecordInstance(RegisterNodeManagerResponse.class);
response.setNodeAction(registerNodeAction );
response.setContainerTokenMasterKey(createMasterKey());
response.setNMTokenMasterKey(createMasterKey());
return response;
}
@Override
public NodeHeartbeatResponse nodeHeartbeat(NodeHeartbeatRequest request)
throws YarnException, IOException {
heartBeatID++;
if(heartBeatID == 1) {
// EOFException should be retried as well.
throw new EOFException("NodeHeartbeat exception");
}
else {
throw new java.net.ConnectException(
"NodeHeartbeat exception");
}
}
@Override
public UnRegisterNodeManagerResponse unRegisterNodeManager(
UnRegisterNodeManagerRequest request) throws YarnException, IOException {
return recordFactory
.newRecordInstance(UnRegisterNodeManagerResponse.class);
}
}
private class MyResourceTracker6 implements ResourceTracker {
private long rmStartIntervalMS;
private boolean rmNeverStart;
private final long waitStartTime;
public MyResourceTracker6(long rmStartIntervalMS, boolean rmNeverStart) {
this.rmStartIntervalMS = rmStartIntervalMS;
this.rmNeverStart = rmNeverStart;
this.waitStartTime = System.currentTimeMillis();
}
@Override
public RegisterNodeManagerResponse registerNodeManager(
RegisterNodeManagerRequest request) throws YarnException, IOException,
IOException {
if (System.currentTimeMillis() - waitStartTime <= rmStartIntervalMS
|| rmNeverStart) {
throw new java.net.ConnectException("Faking RM start failure as start "
+ "delay timer has not expired.");
} else {
NodeId nodeId = request.getNodeId();
Resource resource = request.getResource();
LOG.info("Registering " + nodeId.toString());
// NOTE: this really should be checking against the config value
InetSocketAddress expected = NetUtils.getConnectAddress(
conf.getSocketAddr(YarnConfiguration.NM_ADDRESS, null, -1));
Assert.assertEquals(NetUtils.getHostPortString(expected),
nodeId.toString());
Assert.assertEquals(5 * 1024, resource.getMemory());
registeredNodes.add(nodeId);
RegisterNodeManagerResponse response = recordFactory
.newRecordInstance(RegisterNodeManagerResponse.class);
triggered = true;
return response;
}
}
@Override
public NodeHeartbeatResponse nodeHeartbeat(NodeHeartbeatRequest request)
throws YarnException, IOException {
NodeStatus nodeStatus = request.getNodeStatus();
nodeStatus.setResponseId(heartBeatID++);
NodeHeartbeatResponse nhResponse = YarnServerBuilderUtils.
newNodeHeartbeatResponse(heartBeatID, NodeAction.NORMAL, null,
null, null, null, 1000L);
return nhResponse;
}
@Override
public UnRegisterNodeManagerResponse unRegisterNodeManager(
UnRegisterNodeManagerRequest request) throws YarnException, IOException {
return recordFactory
.newRecordInstance(UnRegisterNodeManagerResponse.class);
}
}
@Before
public void clearError() {
nmStartError = null;
}
@After
public void deleteBaseDir() throws IOException {
FileContext lfs = FileContext.getLocalFSFileContext();
lfs.delete(new Path(basedir.getPath()), true);
}
@Test(timeout = 90000)
public void testRecentlyFinishedContainers() throws Exception {
NodeManager nm = new NodeManager();
YarnConfiguration conf = new YarnConfiguration();
conf.set(
NodeStatusUpdaterImpl.YARN_NODEMANAGER_DURATION_TO_TRACK_STOPPED_CONTAINERS,
"10000");
nm.init(conf);
NodeStatusUpdaterImpl nodeStatusUpdater =
(NodeStatusUpdaterImpl) nm.getNodeStatusUpdater();
ApplicationId appId = ApplicationId.newInstance(0, 0);
ApplicationAttemptId appAttemptId =
ApplicationAttemptId.newInstance(appId, 0);
ContainerId cId = ContainerId.newContainerId(appAttemptId, 0);
nm.getNMContext().getApplications().putIfAbsent(appId,
mock(Application.class));
nm.getNMContext().getContainers().putIfAbsent(cId, mock(Container.class));
nodeStatusUpdater.addCompletedContainer(cId);
Assert.assertTrue(nodeStatusUpdater.isContainerRecentlyStopped(cId));
nm.getNMContext().getContainers().remove(cId);
long time1 = System.currentTimeMillis();
int waitInterval = 15;
while (waitInterval-- > 0
&& nodeStatusUpdater.isContainerRecentlyStopped(cId)) {
nodeStatusUpdater.removeVeryOldStoppedContainersFromCache();
Thread.sleep(1000);
}
long time2 = System.currentTimeMillis();
// By this time the container will be removed from cache. need to verify.
Assert.assertFalse(nodeStatusUpdater.isContainerRecentlyStopped(cId));
Assert.assertTrue((time2 - time1) >= 10000 && (time2 - time1) <= 250000);
}
@Test(timeout = 90000)
public void testRemovePreviousCompletedContainersFromContext() throws Exception {
NodeManager nm = new NodeManager();
YarnConfiguration conf = new YarnConfiguration();
conf.set(
NodeStatusUpdaterImpl
.YARN_NODEMANAGER_DURATION_TO_TRACK_STOPPED_CONTAINERS,
"10000");
nm.init(conf);
NodeStatusUpdaterImpl nodeStatusUpdater =
(NodeStatusUpdaterImpl) nm.getNodeStatusUpdater();
ApplicationId appId = ApplicationId.newInstance(0, 0);
ApplicationAttemptId appAttemptId =
ApplicationAttemptId.newInstance(appId, 0);
ContainerId cId = ContainerId.newContainerId(appAttemptId, 1);
Token containerToken =
BuilderUtils.newContainerToken(cId, "anyHost", 1234, "anyUser",
BuilderUtils.newResource(1024, 1), 0, 123,
"password".getBytes(), 0);
Container anyCompletedContainer = new ContainerImpl(conf, null,
null, null, null, null,
BuilderUtils.newContainerTokenIdentifier(containerToken)) {
@Override
public ContainerState getCurrentState() {
return ContainerState.COMPLETE;
}
@Override
public org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerState getContainerState() {
return org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerState.DONE;
}
};
ContainerId runningContainerId =
ContainerId.newContainerId(appAttemptId, 3);
Token runningContainerToken =
BuilderUtils.newContainerToken(runningContainerId, "anyHost",
1234, "anyUser", BuilderUtils.newResource(1024, 1), 0, 123,
"password".getBytes(), 0);
Container runningContainer =
new ContainerImpl(conf, null, null, null, null, null,
BuilderUtils.newContainerTokenIdentifier(runningContainerToken)) {
@Override
public ContainerState getCurrentState() {
return ContainerState.RUNNING;
}
@Override
public org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerState getContainerState() {
return org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerState.RUNNING;
}
};
nm.getNMContext().getApplications().putIfAbsent(appId,
mock(Application.class));
nm.getNMContext().getContainers().put(cId, anyCompletedContainer);
nm.getNMContext().getContainers()
.put(runningContainerId, runningContainer);
Assert.assertEquals(2, nodeStatusUpdater.getContainerStatuses().size());
List<ContainerId> ackedContainers = new ArrayList<ContainerId>();
ackedContainers.add(cId);
ackedContainers.add(runningContainerId);
nodeStatusUpdater.removeOrTrackCompletedContainersFromContext(ackedContainers);
Set<ContainerId> containerIdSet = new HashSet<ContainerId>();
List<ContainerStatus> containerStatuses = nodeStatusUpdater.getContainerStatuses();
for (ContainerStatus status : containerStatuses) {
containerIdSet.add(status.getContainerId());
}
Assert.assertEquals(1, containerStatuses.size());
// completed container is removed;
Assert.assertFalse(containerIdSet.contains(cId));
// running container is not removed;
Assert.assertTrue(containerIdSet.contains(runningContainerId));
}
@Test
public void testCleanedupApplicationContainerCleanup() throws IOException {
NodeManager nm = new NodeManager();
YarnConfiguration conf = new YarnConfiguration();
conf.set(NodeStatusUpdaterImpl
.YARN_NODEMANAGER_DURATION_TO_TRACK_STOPPED_CONTAINERS,
"1000000");
nm.init(conf);
NodeStatusUpdaterImpl nodeStatusUpdater =
(NodeStatusUpdaterImpl) nm.getNodeStatusUpdater();
ApplicationId appId = ApplicationId.newInstance(0, 0);
ApplicationAttemptId appAttemptId =
ApplicationAttemptId.newInstance(appId, 0);
ContainerId cId = ContainerId.newContainerId(appAttemptId, 1);
Token containerToken =
BuilderUtils.newContainerToken(cId, "anyHost", 1234, "anyUser",
BuilderUtils.newResource(1024, 1), 0, 123,
"password".getBytes(), 0);
Container anyCompletedContainer = new ContainerImpl(conf, null,
null, null, null, null,
BuilderUtils.newContainerTokenIdentifier(containerToken)) {
@Override
public ContainerState getCurrentState() {
return ContainerState.COMPLETE;
}
};
Application application = mock(Application.class);
when(application.getApplicationState()).thenReturn(ApplicationState.RUNNING);
nm.getNMContext().getApplications().putIfAbsent(appId, application);
nm.getNMContext().getContainers().put(cId, anyCompletedContainer);
Assert.assertEquals(1, nodeStatusUpdater.getContainerStatuses().size());
when(application.getApplicationState()).thenReturn(
ApplicationState.FINISHING_CONTAINERS_WAIT);
// The completed container will be saved in case of lost heartbeat.
Assert.assertEquals(1, nodeStatusUpdater.getContainerStatuses().size());
Assert.assertEquals(1, nodeStatusUpdater.getContainerStatuses().size());
nm.getNMContext().getContainers().put(cId, anyCompletedContainer);
nm.getNMContext().getApplications().remove(appId);
// The completed container will be saved in case of lost heartbeat.
Assert.assertEquals(1, nodeStatusUpdater.getContainerStatuses().size());
Assert.assertEquals(1, nodeStatusUpdater.getContainerStatuses().size());
}
@Test
public void testNMRegistration() throws InterruptedException {
nm = new NodeManager() {
@Override
protected NodeStatusUpdater createNodeStatusUpdater(Context context,
Dispatcher dispatcher, NodeHealthCheckerService healthChecker) {
return new MyNodeStatusUpdater(context, dispatcher, healthChecker,
metrics);
}
};
YarnConfiguration conf = createNMConfig();
nm.init(conf);
// verify that the last service is the nodeStatusUpdater (ie registration
// with RM)
Object[] services = nm.getServices().toArray();
Object lastService = services[services.length-1];
Assert.assertTrue("last service is NOT the node status updater",
lastService instanceof NodeStatusUpdater);
new Thread() {
public void run() {
try {
nm.start();
} catch (Throwable e) {
TestNodeStatusUpdater.this.nmStartError = e;
throw new YarnRuntimeException(e);
}
}
}.start();
System.out.println(" ----- thread already started.."
+ nm.getServiceState());
int waitCount = 0;
while (nm.getServiceState() == STATE.INITED && waitCount++ != 50) {
LOG.info("Waiting for NM to start..");
if (nmStartError != null) {
LOG.error("Error during startup. ", nmStartError);
Assert.fail(nmStartError.getCause().getMessage());
}
Thread.sleep(2000);
}
if (nm.getServiceState() != STATE.STARTED) {
// NM could have failed.
Assert.fail("NodeManager failed to start");
}
waitCount = 0;
while (heartBeatID <= 3 && waitCount++ != 200) {
Thread.sleep(1000);
}
Assert.assertFalse(heartBeatID <= 3);
Assert.assertEquals("Number of registered NMs is wrong!!", 1,
this.registeredNodes.size());
nm.stop();
}
@Test
public void testStopReentrant() throws Exception {
final AtomicInteger numCleanups = new AtomicInteger(0);
nm = new NodeManager() {
@Override
protected NodeStatusUpdater createNodeStatusUpdater(Context context,
Dispatcher dispatcher, NodeHealthCheckerService healthChecker) {
MyNodeStatusUpdater myNodeStatusUpdater = new MyNodeStatusUpdater(
context, dispatcher, healthChecker, metrics);
MyResourceTracker2 myResourceTracker2 = new MyResourceTracker2();
myResourceTracker2.heartBeatNodeAction = NodeAction.SHUTDOWN;
myNodeStatusUpdater.resourceTracker = myResourceTracker2;
return myNodeStatusUpdater;
}
@Override
protected ContainerManagerImpl createContainerManager(Context context,
ContainerExecutor exec, DeletionService del,
NodeStatusUpdater nodeStatusUpdater,
ApplicationACLsManager aclsManager,
LocalDirsHandlerService dirsHandler) {
return new ContainerManagerImpl(context, exec, del, nodeStatusUpdater,
metrics, aclsManager, dirsHandler) {
@Override
public void cleanUpApplicationsOnNMShutDown() {
super.cleanUpApplicationsOnNMShutDown();
numCleanups.incrementAndGet();
}
};
}
};
YarnConfiguration conf = createNMConfig();
nm.init(conf);
nm.start();
int waitCount = 0;
while (heartBeatID < 1 && waitCount++ != 200) {
Thread.sleep(500);
}
Assert.assertFalse(heartBeatID < 1);
// Meanwhile call stop directly as the shutdown hook would
nm.stop();
// NM takes a while to reach the STOPPED state.
waitCount = 0;
while (nm.getServiceState() != STATE.STOPPED && waitCount++ != 20) {
LOG.info("Waiting for NM to stop..");
Thread.sleep(1000);
}
Assert.assertEquals(STATE.STOPPED, nm.getServiceState());
Assert.assertEquals(numCleanups.get(), 1);
}
@Test
public void testNodeDecommision() throws Exception {
nm = getNodeManager(NodeAction.SHUTDOWN);
YarnConfiguration conf = createNMConfig();
nm.init(conf);
Assert.assertEquals(STATE.INITED, nm.getServiceState());
nm.start();
int waitCount = 0;
while (heartBeatID < 1 && waitCount++ != 200) {
Thread.sleep(500);
}
Assert.assertFalse(heartBeatID < 1);
Assert.assertTrue(nm.getNMContext().getDecommissioned());
// NM takes a while to reach the STOPPED state.
waitCount = 0;
while (nm.getServiceState() != STATE.STOPPED && waitCount++ != 20) {
LOG.info("Waiting for NM to stop..");
Thread.sleep(1000);
}
Assert.assertEquals(STATE.STOPPED, nm.getServiceState());
}
private abstract class NodeManagerWithCustomNodeStatusUpdater extends NodeManager {
private NodeStatusUpdater updater;
private NodeManagerWithCustomNodeStatusUpdater() {
}
@Override
protected NodeStatusUpdater createNodeStatusUpdater(Context context,
Dispatcher dispatcher,
NodeHealthCheckerService healthChecker) {
updater = createUpdater(context, dispatcher, healthChecker);
return updater;
}
public NodeStatusUpdater getUpdater() {
return updater;
}
abstract NodeStatusUpdater createUpdater(Context context,
Dispatcher dispatcher,
NodeHealthCheckerService healthChecker);
}
@Test
public void testNMShutdownForRegistrationFailure() throws Exception {
nm = new NodeManagerWithCustomNodeStatusUpdater() {
@Override
protected NodeStatusUpdater createUpdater(Context context,
Dispatcher dispatcher, NodeHealthCheckerService healthChecker) {
MyNodeStatusUpdater nodeStatusUpdater = new MyNodeStatusUpdater(
context, dispatcher, healthChecker, metrics);
MyResourceTracker2 myResourceTracker2 = new MyResourceTracker2();
myResourceTracker2.registerNodeAction = NodeAction.SHUTDOWN;
myResourceTracker2.shutDownMessage = "RM Shutting Down Node";
nodeStatusUpdater.resourceTracker = myResourceTracker2;
return nodeStatusUpdater;
}
};
verifyNodeStartFailure(
"Recieved SHUTDOWN signal from Resourcemanager, "
+ "Registration of NodeManager failed, "
+ "Message from ResourceManager: RM Shutting Down Node");
}
@Test (timeout = 150000)
public void testNMConnectionToRM() throws Exception {
final long delta = 50000;
final long connectionWaitMs = 5000;
final long connectionRetryIntervalMs = 1000;
//Waiting for rmStartIntervalMS, RM will be started
final long rmStartIntervalMS = 2*1000;
conf.setLong(YarnConfiguration.RESOURCEMANAGER_CONNECT_MAX_WAIT_MS,
connectionWaitMs);
conf.setLong(YarnConfiguration.RESOURCEMANAGER_CONNECT_RETRY_INTERVAL_MS,
connectionRetryIntervalMs);
//Test NM try to connect to RM Several times, but finally fail
NodeManagerWithCustomNodeStatusUpdater nmWithUpdater;
nm = nmWithUpdater = new NodeManagerWithCustomNodeStatusUpdater() {
@Override
protected NodeStatusUpdater createUpdater(Context context,
Dispatcher dispatcher, NodeHealthCheckerService healthChecker) {
NodeStatusUpdater nodeStatusUpdater = new MyNodeStatusUpdater4(
context, dispatcher, healthChecker, metrics,
rmStartIntervalMS, true);
return nodeStatusUpdater;
}
};
nm.init(conf);
long waitStartTime = System.currentTimeMillis();
try {
nm.start();
Assert.fail("NM should have failed to start due to RM connect failure");
} catch(Exception e) {
long t = System.currentTimeMillis();
long duration = t - waitStartTime;
boolean waitTimeValid = (duration >= connectionWaitMs)
&& (duration < (connectionWaitMs + delta));
if(!waitTimeValid) {
//either the exception was too early, or it had a different cause.
//reject with the inner stack trace
throw new Exception("NM should have tried re-connecting to RM during " +
"period of at least " + connectionWaitMs + " ms, but " +
"stopped retrying within " + (connectionWaitMs + delta) +
" ms: " + e, e);
}
}
//Test NM connect to RM, fail at first several attempts,
//but finally success.
nm = nmWithUpdater = new NodeManagerWithCustomNodeStatusUpdater() {
@Override
protected NodeStatusUpdater createUpdater(Context context,
Dispatcher dispatcher, NodeHealthCheckerService healthChecker) {
NodeStatusUpdater nodeStatusUpdater = new MyNodeStatusUpdater4(
context, dispatcher, healthChecker, metrics, rmStartIntervalMS,
false);
return nodeStatusUpdater;
}
};
nm.init(conf);
NodeStatusUpdater updater = nmWithUpdater.getUpdater();
Assert.assertNotNull("Updater not yet created ", updater);
waitStartTime = System.currentTimeMillis();
try {
nm.start();
} catch (Exception ex){
LOG.error("NM should have started successfully " +
"after connecting to RM.", ex);
throw ex;
}
long duration = System.currentTimeMillis() - waitStartTime;
MyNodeStatusUpdater4 myUpdater = (MyNodeStatusUpdater4) updater;
Assert.assertTrue("NM started before updater triggered",
myUpdater.isTriggered());
Assert.assertTrue("NM should have connected to RM after "
+"the start interval of " + rmStartIntervalMS
+": actual " + duration
+ " " + myUpdater,
(duration >= rmStartIntervalMS));
Assert.assertTrue("NM should have connected to RM less than "
+ (rmStartIntervalMS + delta)
+" milliseconds of RM starting up: actual " + duration
+ " " + myUpdater,
(duration < (rmStartIntervalMS + delta)));
}
/**
* Verifies that if for some reason NM fails to start ContainerManager RPC
* server, RM is oblivious to NM's presence. The behaviour is like this
* because otherwise, NM will report to RM even if all its servers are not
* started properly, RM will think that the NM is alive and will retire the NM
* only after NM_EXPIRY interval. See MAPREDUCE-2749.
*/
@Test
public void testNoRegistrationWhenNMServicesFail() throws Exception {
nm = new NodeManager() {
@Override
protected NodeStatusUpdater createNodeStatusUpdater(Context context,
Dispatcher dispatcher, NodeHealthCheckerService healthChecker) {
return new MyNodeStatusUpdater(context, dispatcher, healthChecker,
metrics);
}
@Override
protected ContainerManagerImpl createContainerManager(Context context,
ContainerExecutor exec, DeletionService del,
NodeStatusUpdater nodeStatusUpdater,
ApplicationACLsManager aclsManager,
LocalDirsHandlerService diskhandler) {
return new ContainerManagerImpl(context, exec, del, nodeStatusUpdater,
metrics, aclsManager, diskhandler) {
@Override
protected void serviceStart() {
// Simulating failure of starting RPC server
throw new YarnRuntimeException("Starting of RPC Server failed");
}
};
}
};
verifyNodeStartFailure("Starting of RPC Server failed");
}
@Test
public void testApplicationKeepAlive() throws Exception {
MyNodeManager nm = new MyNodeManager();
try {
YarnConfiguration conf = createNMConfig();
conf.setBoolean(YarnConfiguration.LOG_AGGREGATION_ENABLED, true);
conf.setLong(YarnConfiguration.RM_NM_EXPIRY_INTERVAL_MS,
4000l);
nm.init(conf);
nm.start();
// HB 2 -> app cancelled by RM.
while (heartBeatID < 12) {
Thread.sleep(1000l);
}
MyResourceTracker3 rt =
(MyResourceTracker3) nm.getNodeStatusUpdater().getRMClient();
rt.context.getApplications().remove(rt.appId);
Assert.assertEquals(1, rt.keepAliveRequests.size());
int numKeepAliveRequests = rt.keepAliveRequests.get(rt.appId).size();
LOG.info("Number of Keep Alive Requests: [" + numKeepAliveRequests + "]");
Assert.assertTrue(numKeepAliveRequests == 2 || numKeepAliveRequests == 3);
while (heartBeatID < 20) {
Thread.sleep(1000l);
}
int numKeepAliveRequests2 = rt.keepAliveRequests.get(rt.appId).size();
Assert.assertEquals(numKeepAliveRequests, numKeepAliveRequests2);
} finally {
if (nm.getServiceState() == STATE.STARTED)
nm.stop();
}
}
/**
* Test completed containerStatus get back up when heart beat lost, and will
* be sent via next heart beat.
*/
@Test(timeout = 200000)
public void testCompletedContainerStatusBackup() throws Exception {
nm = new NodeManager() {
@Override
protected NodeStatusUpdater createNodeStatusUpdater(Context context,
Dispatcher dispatcher, NodeHealthCheckerService healthChecker) {
MyNodeStatusUpdater2 myNodeStatusUpdater =
new MyNodeStatusUpdater2(context, dispatcher, healthChecker,
metrics);
return myNodeStatusUpdater;
}
@Override
protected NMContext createNMContext(
NMContainerTokenSecretManager containerTokenSecretManager,
NMTokenSecretManagerInNM nmTokenSecretManager,
NMStateStoreService store) {
return new MyNMContext(containerTokenSecretManager,
nmTokenSecretManager);
}
};
YarnConfiguration conf = createNMConfig();
nm.init(conf);
nm.start();
int waitCount = 0;
while (heartBeatID <= 4 && waitCount++ != 20) {
Thread.sleep(500);
}
if (heartBeatID <= 4) {
Assert.fail("Failed to get all heartbeats in time, " +
"heartbeatID:" + heartBeatID);
}
if(assertionFailedInThread.get()) {
Assert.fail("ContainerStatus Backup failed");
}
Assert.assertNotNull(nm.getNMContext().getSystemCredentialsForApps()
.get(ApplicationId.newInstance(1234, 1)).getToken(new Text("token1")));
nm.stop();
}
@Test(timeout = 200000)
public void testNodeStatusUpdaterRetryAndNMShutdown()
throws Exception {
final long connectionWaitSecs = 1000;
final long connectionRetryIntervalMs = 1000;
YarnConfiguration conf = createNMConfig();
conf.setLong(YarnConfiguration.RESOURCEMANAGER_CONNECT_MAX_WAIT_MS,
connectionWaitSecs);
conf.setLong(YarnConfiguration
.RESOURCEMANAGER_CONNECT_RETRY_INTERVAL_MS,
connectionRetryIntervalMs);
conf.setLong(YarnConfiguration.NM_SLEEP_DELAY_BEFORE_SIGKILL_MS, 5000);
conf.setLong(YarnConfiguration.NM_LOG_RETAIN_SECONDS, 1);
CyclicBarrier syncBarrier = new CyclicBarrier(2);
nm = new MyNodeManager2(syncBarrier, conf);
nm.init(conf);
nm.start();
// start a container
ContainerId cId = TestNodeManagerShutdown.createContainerId();
FileContext localFS = FileContext.getLocalFSFileContext();
TestNodeManagerShutdown.startContainer(nm, cId, localFS, nmLocalDir,
new File("start_file.txt"));
try {
syncBarrier.await(10000, TimeUnit.MILLISECONDS);
} catch (Exception e) {
}
Assert.assertFalse("Containers not cleaned up when NM stopped",
assertionFailedInThread.get());
Assert.assertTrue(((MyNodeManager2) nm).isStopped);
Assert.assertTrue("calculate heartBeatCount based on" +
" connectionWaitSecs and RetryIntervalSecs", heartBeatID == 2);
}
@Test
public void testRMVersionLessThanMinimum() throws InterruptedException {
final AtomicInteger numCleanups = new AtomicInteger(0);
YarnConfiguration conf = createNMConfig();
conf.set(YarnConfiguration.NM_RESOURCEMANAGER_MINIMUM_VERSION, "3.0.0");
nm = new NodeManager() {
@Override
protected NodeStatusUpdater createNodeStatusUpdater(Context context,
Dispatcher dispatcher, NodeHealthCheckerService healthChecker) {
MyNodeStatusUpdater myNodeStatusUpdater = new MyNodeStatusUpdater(
context, dispatcher, healthChecker, metrics);
MyResourceTracker2 myResourceTracker2 = new MyResourceTracker2();
myResourceTracker2.heartBeatNodeAction = NodeAction.NORMAL;
myResourceTracker2.rmVersion = "3.0.0";
myNodeStatusUpdater.resourceTracker = myResourceTracker2;
return myNodeStatusUpdater;
}
@Override
protected ContainerManagerImpl createContainerManager(Context context,
ContainerExecutor exec, DeletionService del,
NodeStatusUpdater nodeStatusUpdater,
ApplicationACLsManager aclsManager,
LocalDirsHandlerService dirsHandler) {
return new ContainerManagerImpl(context, exec, del, nodeStatusUpdater,
metrics, aclsManager, dirsHandler) {
@Override
public void cleanUpApplicationsOnNMShutDown() {
super.cleanUpApplicationsOnNMShutDown();
numCleanups.incrementAndGet();
}
};
}
};
nm.init(conf);
nm.start();
// NM takes a while to reach the STARTED state.
int waitCount = 0;
while (nm.getServiceState() != STATE.STARTED && waitCount++ != 20) {
LOG.info("Waiting for NM to stop..");
Thread.sleep(1000);
}
Assert.assertTrue(nm.getServiceState() == STATE.STARTED);
nm.stop();
}
@Test
public void testConcurrentAccessToSystemCredentials(){
final Map<ApplicationId, ByteBuffer> testCredentials = new HashMap<>();
ByteBuffer byteBuffer = ByteBuffer.wrap(new byte[300]);
ApplicationId applicationId = ApplicationId.newInstance(123456, 120);
testCredentials.put(applicationId, byteBuffer);
final List<Throwable> exceptions = Collections.synchronizedList(new
ArrayList<Throwable>());
final int NUM_THREADS = 10;
final CountDownLatch allDone = new CountDownLatch(NUM_THREADS);
final ExecutorService threadPool = Executors.newFixedThreadPool(
NUM_THREADS);
final AtomicBoolean stop = new AtomicBoolean(false);
try {
for (int i = 0; i < NUM_THREADS; i++) {
threadPool.submit(new Runnable() {
@Override
public void run() {
try {
for (int i = 0; i < 100 && !stop.get(); i++) {
NodeHeartbeatResponse nodeHeartBeatResponse =
newNodeHeartbeatResponse(0, NodeAction.NORMAL,
null, null, null, null, 0);
nodeHeartBeatResponse.setSystemCredentialsForApps(
testCredentials);
NodeHeartbeatResponseProto proto =
((NodeHeartbeatResponsePBImpl)nodeHeartBeatResponse)
.getProto();
Assert.assertNotNull(proto);
}
} catch (Throwable t) {
exceptions.add(t);
stop.set(true);
} finally {
allDone.countDown();
}
}
});
}
int testTimeout = 2;
Assert.assertTrue("Timeout waiting for more than " + testTimeout + " " +
"seconds",
allDone.await(testTimeout, TimeUnit.SECONDS));
} catch (InterruptedException ie) {
exceptions.add(ie);
} finally {
threadPool.shutdownNow();
}
Assert.assertTrue("Test failed with exception(s)" + exceptions,
exceptions.isEmpty());
}
// Add new containers info into NM context each time node heart beats.
private class MyNMContext extends NMContext {
public MyNMContext(
NMContainerTokenSecretManager containerTokenSecretManager,
NMTokenSecretManagerInNM nmTokenSecretManager) {
super(containerTokenSecretManager, nmTokenSecretManager, null, null,
new NMNullStateStoreService());
}
@Override
public ConcurrentMap<ContainerId, Container> getContainers() {
if (heartBeatID == 0) {
return containers;
} else if (heartBeatID == 1) {
ContainerStatus containerStatus2 =
createContainerStatus(2, ContainerState.RUNNING);
putMockContainer(containerStatus2);
ContainerStatus containerStatus3 =
createContainerStatus(3, ContainerState.COMPLETE);
putMockContainer(containerStatus3);
return containers;
} else if (heartBeatID == 2) {
ContainerStatus containerStatus4 =
createContainerStatus(4, ContainerState.RUNNING);
putMockContainer(containerStatus4);
ContainerStatus containerStatus5 =
createContainerStatus(5, ContainerState.COMPLETE);
putMockContainer(containerStatus5);
return containers;
} else if (heartBeatID == 3 || heartBeatID == 4) {
return containers;
} else {
containers.clear();
return containers;
}
}
private void putMockContainer(ContainerStatus containerStatus) {
Container container = getMockContainer(containerStatus);
containers.put(containerStatus.getContainerId(), container);
applications.putIfAbsent(containerStatus.getContainerId()
.getApplicationAttemptId().getApplicationId(),
mock(Application.class));
}
}
public static ContainerStatus createContainerStatus(int id,
ContainerState containerState) {
ApplicationId applicationId = ApplicationId.newInstance(0, 1);
ApplicationAttemptId applicationAttemptId =
ApplicationAttemptId.newInstance(applicationId, 1);
ContainerId contaierId = ContainerId.newContainerId(applicationAttemptId, id);
ContainerStatus containerStatus =
BuilderUtils.newContainerStatus(contaierId, containerState,
"test_containerStatus: id=" + id + ", containerState: "
+ containerState, 0);
return containerStatus;
}
public static Container getMockContainer(ContainerStatus containerStatus) {
ContainerImpl container = mock(ContainerImpl.class);
when(container.cloneAndGetContainerStatus()).thenReturn(containerStatus);
when(container.getCurrentState()).thenReturn(containerStatus.getState());
when(container.getContainerId()).thenReturn(
containerStatus.getContainerId());
if (containerStatus.getState().equals(ContainerState.COMPLETE)) {
when(container.getContainerState())
.thenReturn(org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerState.DONE);
} else if (containerStatus.getState().equals(ContainerState.RUNNING)) {
when(container.getContainerState())
.thenReturn(org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerState.RUNNING);
}
return container;
}
private void verifyNodeStartFailure(String errMessage) throws Exception {
Assert.assertNotNull("nm is null", nm);
YarnConfiguration conf = createNMConfig();
nm.init(conf);
try {
nm.start();
Assert.fail("NM should have failed to start. Didn't get exception!!");
} catch (Exception e) {
//the version in trunk looked in the cause for equality
// and assumed failures were nested.
//this version assumes that error strings propagate to the base and
//use a contains() test only. It should be less brittle
if(!e.getMessage().contains(errMessage)) {
throw e;
}
}
// the service should be stopped
Assert.assertEquals("NM state is wrong!", STATE.STOPPED, nm
.getServiceState());
Assert.assertEquals("Number of registered nodes is wrong!", 0,
this.registeredNodes.size());
}
private YarnConfiguration createNMConfig() {
YarnConfiguration conf = new YarnConfiguration();
String localhostAddress = null;
try {
localhostAddress = InetAddress.getByName("localhost").getCanonicalHostName();
} catch (UnknownHostException e) {
Assert.fail("Unable to get localhost address: " + e.getMessage());
}
conf.setInt(YarnConfiguration.NM_PMEM_MB, 5 * 1024); // 5GB
conf.set(YarnConfiguration.NM_ADDRESS, localhostAddress + ":12345");
conf.set(YarnConfiguration.NM_LOCALIZER_ADDRESS, localhostAddress + ":12346");
conf.set(YarnConfiguration.NM_LOG_DIRS, logsDir.getAbsolutePath());
conf.set(YarnConfiguration.NM_REMOTE_APP_LOG_DIR,
remoteLogsDir.getAbsolutePath());
conf.set(YarnConfiguration.NM_LOCAL_DIRS, nmLocalDir.getAbsolutePath());
conf.setLong(YarnConfiguration.NM_LOG_RETAIN_SECONDS, 1);
return conf;
}
private NodeManager getNodeManager(final NodeAction nodeHeartBeatAction) {
return new NodeManager() {
@Override
protected NodeStatusUpdater createNodeStatusUpdater(Context context,
Dispatcher dispatcher, NodeHealthCheckerService healthChecker) {
MyNodeStatusUpdater myNodeStatusUpdater = new MyNodeStatusUpdater(
context, dispatcher, healthChecker, metrics);
MyResourceTracker2 myResourceTracker2 = new MyResourceTracker2();
myResourceTracker2.heartBeatNodeAction = nodeHeartBeatAction;
myNodeStatusUpdater.resourceTracker = myResourceTracker2;
return myNodeStatusUpdater;
}
};
}
}
| 68,211 | 38.890058 | 122 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestDockerContainerExecutorWithMocks.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.nodemanager;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.junit.Assume.assumeTrue;
import static org.mockito.Mockito.RETURNS_DEEP_STUBS;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
import java.io.File;
import java.io.FileReader;
import java.io.IOException;
import java.io.LineNumberReader;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.FileContext;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.util.Shell;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
import org.apache.hadoop.yarn.server.nodemanager.executor.ContainerStartContext;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
/**
* Mock tests for docker container executor
*/
public class TestDockerContainerExecutorWithMocks {
private static final Log LOG = LogFactory
.getLog(TestDockerContainerExecutorWithMocks.class);
public static final String DOCKER_LAUNCH_COMMAND = "/bin/true";
private DockerContainerExecutor dockerContainerExecutor = null;
private LocalDirsHandlerService dirsHandler;
private Path workDir;
private FileContext lfs;
private String yarnImage;
@Before
public void setup() {
assumeTrue(Shell.LINUX);
File f = new File("./src/test/resources/mock-container-executor");
if(!FileUtil.canExecute(f)) {
FileUtil.setExecutable(f, true);
}
String executorPath = f.getAbsolutePath();
Configuration conf = new Configuration();
yarnImage = "yarnImage";
long time = System.currentTimeMillis();
conf.set(YarnConfiguration.NM_LINUX_CONTAINER_EXECUTOR_PATH, executorPath);
conf.set(YarnConfiguration.NM_LOCAL_DIRS, "/tmp/nm-local-dir" + time);
conf.set(YarnConfiguration.NM_LOG_DIRS, "/tmp/userlogs" + time);
conf.set(YarnConfiguration.NM_DOCKER_CONTAINER_EXECUTOR_IMAGE_NAME,
yarnImage);
conf.set(YarnConfiguration.NM_DOCKER_CONTAINER_EXECUTOR_EXEC_NAME,
DOCKER_LAUNCH_COMMAND);
dockerContainerExecutor = new DockerContainerExecutor();
dirsHandler = new LocalDirsHandlerService();
dirsHandler.init(conf);
dockerContainerExecutor.setConf(conf);
lfs = null;
try {
lfs = FileContext.getLocalFSFileContext();
workDir = new Path("/tmp/temp-"+ System.currentTimeMillis());
lfs.mkdir(workDir, FsPermission.getDirDefault(), true);
} catch (IOException e) {
throw new RuntimeException(e);
}
}
@After
public void tearDown() {
try {
if (lfs != null) {
lfs.delete(workDir, true);
}
} catch (IOException e) {
throw new RuntimeException(e);
}
}
@Test(expected = IllegalStateException.class)
//Test that DockerContainerExecutor doesn't successfully init on a secure
//cluster
public void testContainerInitSecure() throws IOException {
dockerContainerExecutor.getConf().set(
CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION, "kerberos");
dockerContainerExecutor.init();
}
@Test(expected = IllegalArgumentException.class)
//Test that when the image name is null, the container launch throws an
//IllegalArgumentException
public void testContainerLaunchNullImage() throws IOException {
String appSubmitter = "nobody";
String appId = "APP_ID";
String containerId = "CONTAINER_ID";
String testImage = "";
Container container = mock(Container.class, RETURNS_DEEP_STUBS);
ContainerId cId = mock(ContainerId.class, RETURNS_DEEP_STUBS);
ContainerLaunchContext context = mock(ContainerLaunchContext.class);
HashMap<String, String> env = new HashMap<String,String>();
when(container.getContainerId()).thenReturn(cId);
when(container.getLaunchContext()).thenReturn(context);
when(cId.getApplicationAttemptId().getApplicationId().toString())
.thenReturn(appId);
when(cId.toString()).thenReturn(containerId);
when(context.getEnvironment()).thenReturn(env);
env.put(
YarnConfiguration.NM_DOCKER_CONTAINER_EXECUTOR_IMAGE_NAME, testImage);
dockerContainerExecutor.getConf().set(
YarnConfiguration.NM_DOCKER_CONTAINER_EXECUTOR_IMAGE_NAME, testImage);
Path scriptPath = new Path("file:///bin/echo");
Path tokensPath = new Path("file:///dev/null");
Path pidFile = new Path(workDir, "pid.txt");
dockerContainerExecutor.activateContainer(cId, pidFile);
dockerContainerExecutor.launchContainer(new ContainerStartContext.Builder()
.setContainer(container)
.setNmPrivateContainerScriptPath(scriptPath)
.setNmPrivateTokensPath(tokensPath)
.setUser(appSubmitter)
.setAppId(appId)
.setContainerWorkDir(workDir)
.setLocalDirs(dirsHandler.getLocalDirs())
.setLogDirs(dirsHandler.getLogDirs())
.build());
}
@Test(expected = IllegalArgumentException.class)
//Test that when the image name is invalid, the container launch throws an
//IllegalArgumentException
public void testContainerLaunchInvalidImage() throws IOException {
String appSubmitter = "nobody";
String appId = "APP_ID";
String containerId = "CONTAINER_ID";
String testImage = "testrepo.com/test-image rm -rf $HADOOP_PREFIX/*";
Container container = mock(Container.class, RETURNS_DEEP_STUBS);
ContainerId cId = mock(ContainerId.class, RETURNS_DEEP_STUBS);
ContainerLaunchContext context = mock(ContainerLaunchContext.class);
HashMap<String, String> env = new HashMap<String,String>();
when(container.getContainerId()).thenReturn(cId);
when(container.getLaunchContext()).thenReturn(context);
when(cId.getApplicationAttemptId().getApplicationId().toString())
.thenReturn(appId);
when(cId.toString()).thenReturn(containerId);
when(context.getEnvironment()).thenReturn(env);
env.put(
YarnConfiguration.NM_DOCKER_CONTAINER_EXECUTOR_IMAGE_NAME, testImage);
dockerContainerExecutor.getConf().set(
YarnConfiguration.NM_DOCKER_CONTAINER_EXECUTOR_IMAGE_NAME, testImage);
Path scriptPath = new Path("file:///bin/echo");
Path tokensPath = new Path("file:///dev/null");
Path pidFile = new Path(workDir, "pid.txt");
dockerContainerExecutor.activateContainer(cId, pidFile);
dockerContainerExecutor.launchContainer(
new ContainerStartContext.Builder()
.setContainer(container)
.setNmPrivateContainerScriptPath(scriptPath)
.setNmPrivateTokensPath(tokensPath)
.setUser(appSubmitter)
.setAppId(appId)
.setContainerWorkDir(workDir)
.setLocalDirs(dirsHandler.getLocalDirs())
.setLogDirs(dirsHandler.getLogDirs())
.build());
}
@Test
//Test that a container launch correctly wrote the session script with the
//commands we expected
public void testContainerLaunch() throws IOException {
String appSubmitter = "nobody";
String appId = "APP_ID";
String containerId = "CONTAINER_ID";
String testImage = "\"sequenceiq/hadoop-docker:2.4.1\"";
Container container = mock(Container.class, RETURNS_DEEP_STUBS);
ContainerId cId = mock(ContainerId.class, RETURNS_DEEP_STUBS);
ContainerLaunchContext context = mock(ContainerLaunchContext.class);
HashMap<String, String> env = new HashMap<String,String>();
when(container.getContainerId()).thenReturn(cId);
when(container.getLaunchContext()).thenReturn(context);
when(cId.getApplicationAttemptId().getApplicationId().toString())
.thenReturn(appId);
when(cId.toString()).thenReturn(containerId);
when(context.getEnvironment()).thenReturn(env);
env.put(
YarnConfiguration.NM_DOCKER_CONTAINER_EXECUTOR_IMAGE_NAME, testImage);
Path scriptPath = new Path("file:///bin/echo");
Path tokensPath = new Path("file:///dev/null");
Path pidFile = new Path(workDir, "pid");
dockerContainerExecutor.activateContainer(cId, pidFile);
int ret = dockerContainerExecutor.launchContainer(
new ContainerStartContext.Builder()
.setContainer(container)
.setNmPrivateContainerScriptPath(scriptPath)
.setNmPrivateTokensPath(tokensPath)
.setUser(appSubmitter)
.setAppId(appId)
.setContainerWorkDir(workDir)
.setLocalDirs(dirsHandler.getLocalDirs())
.setLogDirs(dirsHandler.getLogDirs())
.build());
assertEquals(0, ret);
//get the script
Path sessionScriptPath = new Path(workDir,
Shell.appendScriptExtension(
DockerContainerExecutor.DOCKER_CONTAINER_EXECUTOR_SESSION_SCRIPT));
LineNumberReader lnr = new LineNumberReader(new FileReader(
sessionScriptPath.toString()));
boolean cmdFound = false;
List<String> localDirs = dirsToMount(dirsHandler.getLocalDirs());
List<String> logDirs = dirsToMount(dirsHandler.getLogDirs());
List<String> workDirMount = dirsToMount(Collections.singletonList(
workDir.toUri().getPath()));
List<String> expectedCommands = new ArrayList<String>(Arrays.asList(
DOCKER_LAUNCH_COMMAND, "run", "--rm", "--net=host", "--name",
containerId));
expectedCommands.addAll(localDirs);
expectedCommands.addAll(logDirs);
expectedCommands.addAll(workDirMount);
String shellScript = workDir + "/launch_container.sh";
expectedCommands.addAll(Arrays.asList(testImage.replaceAll("['\"]", ""),
"bash","\"" + shellScript + "\""));
String expectedPidString =
"echo `/bin/true inspect --format {{.State.Pid}} " + containerId+"` > "+
pidFile.toString() + ".tmp";
boolean pidSetterFound = false;
while(lnr.ready()){
String line = lnr.readLine();
LOG.debug("line: " + line);
if (line.startsWith(DOCKER_LAUNCH_COMMAND)){
List<String> command = new ArrayList<String>();
for( String s :line.split("\\s+")){
command.add(s.trim());
}
assertEquals(expectedCommands, command);
cmdFound = true;
} else if (line.startsWith("echo")) {
assertEquals(expectedPidString, line);
pidSetterFound = true;
}
}
assertTrue(cmdFound);
assertTrue(pidSetterFound);
}
private List<String> dirsToMount(List<String> dirs) {
List<String> localDirs = new ArrayList<String>();
for(String dir: dirs){
localDirs.add("-v");
localDirs.add(dir + ":" + dir);
}
return localDirs;
}
}
| 11,844 | 37.70915 | 86 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/MockContainerLocalizer.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.nodemanager;
import java.net.InetSocketAddress;
import java.util.List;
public class MockContainerLocalizer {
public static void buildMainArgs(List<String> command,
String user, String appId, String locId,
InetSocketAddress nmAddr, List<String> localDirs) {
command.add(MockContainerLocalizer.class.getName());
command.add(user);
command.add(appId);
command.add(locId);
command.add(nmAddr.getHostName());
command.add(Integer.toString(nmAddr.getPort()));
for(String dir : localDirs) {
command.add(dir);
}
}
public static void main(String[] argv) throws Throwable {
//DO Nothing
}
}
| 1,476 | 34.166667 | 74 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestDirectoryCollection.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.nodemanager;
import java.io.File;
import java.io.IOException;
import java.util.List;
import java.util.ListIterator;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.FileContext;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.server.nodemanager.DirectoryCollection.DirsChangeListener;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Before;
import org.junit.Test;
public class TestDirectoryCollection {
private static final File testDir = new File("target",
TestDirectoryCollection.class.getName()).getAbsoluteFile();
private static final File testFile = new File(testDir, "testfile");
private Configuration conf;
private FileContext localFs;
@Before
public void setupForTests() throws IOException {
conf = new Configuration();
localFs = FileContext.getLocalFSFileContext(conf);
testDir.mkdirs();
testFile.createNewFile();
}
@After
public void teardown() {
FileUtil.fullyDelete(testDir);
}
@Test
public void testConcurrentAccess() throws IOException {
// Initialize DirectoryCollection with a file instead of a directory
String[] dirs = {testFile.getPath()};
DirectoryCollection dc =
new DirectoryCollection(dirs, conf.getFloat(
YarnConfiguration.NM_MAX_PER_DISK_UTILIZATION_PERCENTAGE,
YarnConfiguration.DEFAULT_NM_MAX_PER_DISK_UTILIZATION_PERCENTAGE));
// Create an iterator before checkDirs is called to reliable test case
List<String> list = dc.getGoodDirs();
ListIterator<String> li = list.listIterator();
// DiskErrorException will invalidate iterator of non-concurrent
// collections. ConcurrentModificationException will be thrown upon next
// use of the iterator.
Assert.assertTrue("checkDirs did not remove test file from directory list",
dc.checkDirs());
// Verify no ConcurrentModification is thrown
li.next();
}
@Test
public void testCreateDirectories() throws IOException {
conf.set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY, "077");
String dirA = new File(testDir, "dirA").getPath();
String dirB = new File(dirA, "dirB").getPath();
String dirC = new File(testDir, "dirC").getPath();
Path pathC = new Path(dirC);
FsPermission permDirC = new FsPermission((short)0710);
localFs.mkdir(pathC, null, true);
localFs.setPermission(pathC, permDirC);
String[] dirs = { dirA, dirB, dirC };
DirectoryCollection dc =
new DirectoryCollection(dirs, conf.getFloat(
YarnConfiguration.NM_MAX_PER_DISK_UTILIZATION_PERCENTAGE,
YarnConfiguration.DEFAULT_NM_MAX_PER_DISK_UTILIZATION_PERCENTAGE));
FsPermission defaultPerm = FsPermission.getDefault()
.applyUMask(new FsPermission((short)FsPermission.DEFAULT_UMASK));
boolean createResult = dc.createNonExistentDirs(localFs, defaultPerm);
Assert.assertTrue(createResult);
FileStatus status = localFs.getFileStatus(new Path(dirA));
Assert.assertEquals("local dir parent not created with proper permissions",
defaultPerm, status.getPermission());
status = localFs.getFileStatus(new Path(dirB));
Assert.assertEquals("local dir not created with proper permissions",
defaultPerm, status.getPermission());
status = localFs.getFileStatus(pathC);
Assert.assertEquals("existing local directory permissions modified",
permDirC, status.getPermission());
}
@Test
public void testDiskSpaceUtilizationLimit() throws IOException {
String dirA = new File(testDir, "dirA").getPath();
String[] dirs = { dirA };
DirectoryCollection dc = new DirectoryCollection(dirs, 0.0F);
dc.checkDirs();
Assert.assertEquals(0, dc.getGoodDirs().size());
Assert.assertEquals(1, dc.getFailedDirs().size());
Assert.assertEquals(1, dc.getFullDirs().size());
// no good dirs
Assert.assertEquals(0, dc.getGoodDirsDiskUtilizationPercentage());
dc = new DirectoryCollection(dirs, 100.0F);
int utilizedSpacePerc =
(int) ((testDir.getTotalSpace() - testDir.getUsableSpace()) * 100 /
testDir.getTotalSpace());
dc.checkDirs();
Assert.assertEquals(1, dc.getGoodDirs().size());
Assert.assertEquals(0, dc.getFailedDirs().size());
Assert.assertEquals(0, dc.getFullDirs().size());
Assert.assertEquals(utilizedSpacePerc,
dc.getGoodDirsDiskUtilizationPercentage());
dc = new DirectoryCollection(dirs, testDir.getTotalSpace() / (1024 * 1024));
dc.checkDirs();
Assert.assertEquals(0, dc.getGoodDirs().size());
Assert.assertEquals(1, dc.getFailedDirs().size());
Assert.assertEquals(1, dc.getFullDirs().size());
// no good dirs
Assert.assertEquals(0, dc.getGoodDirsDiskUtilizationPercentage());
dc = new DirectoryCollection(dirs, 100.0F, 0);
utilizedSpacePerc =
(int)((testDir.getTotalSpace() - testDir.getUsableSpace()) * 100 /
testDir.getTotalSpace());
dc.checkDirs();
Assert.assertEquals(1, dc.getGoodDirs().size());
Assert.assertEquals(0, dc.getFailedDirs().size());
Assert.assertEquals(0, dc.getFullDirs().size());
Assert.assertEquals(utilizedSpacePerc,
dc.getGoodDirsDiskUtilizationPercentage());
}
@Test
public void testDiskLimitsCutoffSetters() throws IOException {
String[] dirs = { "dir" };
DirectoryCollection dc = new DirectoryCollection(dirs, 0.0F, 100);
float testValue = 57.5F;
float delta = 0.1F;
dc.setDiskUtilizationPercentageCutoff(testValue);
Assert.assertEquals(testValue, dc.getDiskUtilizationPercentageCutoff(),
delta);
testValue = -57.5F;
dc.setDiskUtilizationPercentageCutoff(testValue);
Assert.assertEquals(0.0F, dc.getDiskUtilizationPercentageCutoff(), delta);
testValue = 157.5F;
dc.setDiskUtilizationPercentageCutoff(testValue);
Assert.assertEquals(100.0F, dc.getDiskUtilizationPercentageCutoff(), delta);
long spaceValue = 57;
dc.setDiskUtilizationSpaceCutoff(spaceValue);
Assert.assertEquals(spaceValue, dc.getDiskUtilizationSpaceCutoff());
spaceValue = -57;
dc.setDiskUtilizationSpaceCutoff(spaceValue);
Assert.assertEquals(0, dc.getDiskUtilizationSpaceCutoff());
}
@Test
public void testFailedDisksBecomingGoodAgain() throws Exception {
String dirA = new File(testDir, "dirA").getPath();
String[] dirs = { dirA };
DirectoryCollection dc = new DirectoryCollection(dirs, 0.0F);
dc.checkDirs();
Assert.assertEquals(0, dc.getGoodDirs().size());
Assert.assertEquals(1, dc.getFailedDirs().size());
Assert.assertEquals(1, dc.getFullDirs().size());
dc.setDiskUtilizationPercentageCutoff(100.0F);
dc.checkDirs();
Assert.assertEquals(1, dc.getGoodDirs().size());
Assert.assertEquals(0, dc.getFailedDirs().size());
Assert.assertEquals(0, dc.getFullDirs().size());
conf.set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY, "077");
String dirB = new File(testDir, "dirB").getPath();
Path pathB = new Path(dirB);
FsPermission permDirB = new FsPermission((short) 0400);
localFs.mkdir(pathB, null, true);
localFs.setPermission(pathB, permDirB);
String[] dirs2 = { dirB };
dc = new DirectoryCollection(dirs2, 100.0F);
dc.checkDirs();
Assert.assertEquals(0, dc.getGoodDirs().size());
Assert.assertEquals(1, dc.getFailedDirs().size());
Assert.assertEquals(0, dc.getFullDirs().size());
permDirB = new FsPermission((short) 0700);
localFs.setPermission(pathB, permDirB);
dc.checkDirs();
Assert.assertEquals(1, dc.getGoodDirs().size());
Assert.assertEquals(0, dc.getFailedDirs().size());
Assert.assertEquals(0, dc.getFullDirs().size());
}
@Test
public void testConstructors() {
String[] dirs = { "dir" };
float delta = 0.1F;
DirectoryCollection dc = new DirectoryCollection(dirs);
Assert.assertEquals(100.0F, dc.getDiskUtilizationPercentageCutoff(), delta);
Assert.assertEquals(0, dc.getDiskUtilizationSpaceCutoff());
dc = new DirectoryCollection(dirs, 57.5F);
Assert.assertEquals(57.5F, dc.getDiskUtilizationPercentageCutoff(), delta);
Assert.assertEquals(0, dc.getDiskUtilizationSpaceCutoff());
dc = new DirectoryCollection(dirs, 57);
Assert.assertEquals(100.0F, dc.getDiskUtilizationPercentageCutoff(), delta);
Assert.assertEquals(57, dc.getDiskUtilizationSpaceCutoff());
dc = new DirectoryCollection(dirs, 57.5F, 67);
Assert.assertEquals(57.5F, dc.getDiskUtilizationPercentageCutoff(), delta);
Assert.assertEquals(67, dc.getDiskUtilizationSpaceCutoff());
dc = new DirectoryCollection(dirs, -57.5F, -67);
Assert.assertEquals(0.0F, dc.getDiskUtilizationPercentageCutoff(), delta);
Assert.assertEquals(0, dc.getDiskUtilizationSpaceCutoff());
dc = new DirectoryCollection(dirs, 157.5F, -67);
Assert.assertEquals(100.0F, dc.getDiskUtilizationPercentageCutoff(), delta);
Assert.assertEquals(0, dc.getDiskUtilizationSpaceCutoff());
}
@Test
public void testDirsChangeListener() {
DirsChangeListenerTest listener1 = new DirsChangeListenerTest();
DirsChangeListenerTest listener2 = new DirsChangeListenerTest();
DirsChangeListenerTest listener3 = new DirsChangeListenerTest();
String dirA = new File(testDir, "dirA").getPath();
String[] dirs = { dirA };
DirectoryCollection dc = new DirectoryCollection(dirs, 0.0F);
Assert.assertEquals(1, dc.getGoodDirs().size());
Assert.assertEquals(listener1.num, 0);
Assert.assertEquals(listener2.num, 0);
Assert.assertEquals(listener3.num, 0);
dc.registerDirsChangeListener(listener1);
dc.registerDirsChangeListener(listener2);
dc.registerDirsChangeListener(listener3);
Assert.assertEquals(listener1.num, 1);
Assert.assertEquals(listener2.num, 1);
Assert.assertEquals(listener3.num, 1);
dc.deregisterDirsChangeListener(listener3);
dc.checkDirs();
Assert.assertEquals(0, dc.getGoodDirs().size());
Assert.assertEquals(listener1.num, 2);
Assert.assertEquals(listener2.num, 2);
Assert.assertEquals(listener3.num, 1);
dc.deregisterDirsChangeListener(listener2);
dc.setDiskUtilizationPercentageCutoff(100.0F);
dc.checkDirs();
Assert.assertEquals(1, dc.getGoodDirs().size());
Assert.assertEquals(listener1.num, 3);
Assert.assertEquals(listener2.num, 2);
Assert.assertEquals(listener3.num, 1);
}
static class DirsChangeListenerTest implements DirsChangeListener {
public int num = 0;
public DirsChangeListenerTest() {
}
@Override
public void onDirsChanged() {
num++;
}
}
}
| 11,744 | 37.009709 | 88 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/security/TestNMTokenSecretManagerInNM.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.nodemanager.security;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.io.IOException;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.security.token.SecretManager.InvalidToken;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.security.NMTokenIdentifier;
import org.apache.hadoop.yarn.server.api.records.MasterKey;
import org.apache.hadoop.yarn.server.nodemanager.recovery.NMMemoryStateStoreService;
import org.apache.hadoop.yarn.server.security.BaseNMTokenSecretManager;
import org.apache.hadoop.yarn.util.ConverterUtils;
import org.junit.Test;
public class TestNMTokenSecretManagerInNM {
@Test
public void testRecovery() throws IOException {
YarnConfiguration conf = new YarnConfiguration();
conf.setBoolean(YarnConfiguration.NM_RECOVERY_ENABLED, true);
final NodeId nodeId = NodeId.newInstance("somehost", 1234);
final ApplicationAttemptId attempt1 =
ApplicationAttemptId.newInstance(ApplicationId.newInstance(1, 1), 1);
final ApplicationAttemptId attempt2 =
ApplicationAttemptId.newInstance(ApplicationId.newInstance(2, 2), 2);
NMTokenKeyGeneratorForTest keygen = new NMTokenKeyGeneratorForTest();
NMMemoryStateStoreService stateStore = new NMMemoryStateStoreService();
stateStore.init(conf);
stateStore.start();
NMTokenSecretManagerInNM secretMgr =
new NMTokenSecretManagerInNM(stateStore);
secretMgr.setNodeId(nodeId);
MasterKey currentKey = keygen.generateKey();
secretMgr.setMasterKey(currentKey);
NMTokenIdentifier attemptToken1 =
getNMTokenId(secretMgr.createNMToken(attempt1, nodeId, "user1"));
NMTokenIdentifier attemptToken2 =
getNMTokenId(secretMgr.createNMToken(attempt2, nodeId, "user2"));
secretMgr.appAttemptStartContainer(attemptToken1);
secretMgr.appAttemptStartContainer(attemptToken2);
assertTrue(secretMgr.isAppAttemptNMTokenKeyPresent(attempt1));
assertTrue(secretMgr.isAppAttemptNMTokenKeyPresent(attempt2));
assertNotNull(secretMgr.retrievePassword(attemptToken1));
assertNotNull(secretMgr.retrievePassword(attemptToken2));
// restart and verify key is still there and token still valid
secretMgr = new NMTokenSecretManagerInNM(stateStore);
secretMgr.recover();
secretMgr.setNodeId(nodeId);
assertEquals(currentKey, secretMgr.getCurrentKey());
assertTrue(secretMgr.isAppAttemptNMTokenKeyPresent(attempt1));
assertTrue(secretMgr.isAppAttemptNMTokenKeyPresent(attempt2));
assertNotNull(secretMgr.retrievePassword(attemptToken1));
assertNotNull(secretMgr.retrievePassword(attemptToken2));
// roll master key and remove an app
currentKey = keygen.generateKey();
secretMgr.setMasterKey(currentKey);
secretMgr.appFinished(attempt1.getApplicationId());
// restart and verify attempt1 key is still valid due to prev key persist
secretMgr = new NMTokenSecretManagerInNM(stateStore);
secretMgr.recover();
secretMgr.setNodeId(nodeId);
assertEquals(currentKey, secretMgr.getCurrentKey());
assertFalse(secretMgr.isAppAttemptNMTokenKeyPresent(attempt1));
assertTrue(secretMgr.isAppAttemptNMTokenKeyPresent(attempt2));
assertNotNull(secretMgr.retrievePassword(attemptToken1));
assertNotNull(secretMgr.retrievePassword(attemptToken2));
// roll master key again, restart, and verify attempt1 key is bad but
// attempt2 is still good due to app key persist
currentKey = keygen.generateKey();
secretMgr.setMasterKey(currentKey);
secretMgr = new NMTokenSecretManagerInNM(stateStore);
secretMgr.recover();
secretMgr.setNodeId(nodeId);
assertEquals(currentKey, secretMgr.getCurrentKey());
assertFalse(secretMgr.isAppAttemptNMTokenKeyPresent(attempt1));
assertTrue(secretMgr.isAppAttemptNMTokenKeyPresent(attempt2));
try {
secretMgr.retrievePassword(attemptToken1);
fail("attempt token should not still be valid");
} catch (InvalidToken e) {
// expected
}
assertNotNull(secretMgr.retrievePassword(attemptToken2));
// remove last attempt, restart, verify both tokens are now bad
secretMgr.appFinished(attempt2.getApplicationId());
secretMgr = new NMTokenSecretManagerInNM(stateStore);
secretMgr.recover();
secretMgr.setNodeId(nodeId);
assertEquals(currentKey, secretMgr.getCurrentKey());
assertFalse(secretMgr.isAppAttemptNMTokenKeyPresent(attempt1));
assertFalse(secretMgr.isAppAttemptNMTokenKeyPresent(attempt2));
try {
secretMgr.retrievePassword(attemptToken1);
fail("attempt token should not still be valid");
} catch (InvalidToken e) {
// expected
}
try {
secretMgr.retrievePassword(attemptToken2);
fail("attempt token should not still be valid");
} catch (InvalidToken e) {
// expected
}
stateStore.close();
}
private NMTokenIdentifier getNMTokenId(
org.apache.hadoop.yarn.api.records.Token token) throws IOException {
Token<NMTokenIdentifier> convertedToken =
ConverterUtils.convertFromYarn(token, (Text) null);
return convertedToken.decodeIdentifier();
}
private static class NMTokenKeyGeneratorForTest extends
BaseNMTokenSecretManager {
public MasterKey generateKey() {
return createNewMasterKey().getMasterKey();
}
}
}
| 6,570 | 41.393548 | 84 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/security/TestNMContainerTokenSecretManager.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.nodemanager.security;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.security.token.SecretManager.InvalidToken;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.api.records.Priority;
import org.apache.hadoop.yarn.api.records.Token;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.security.ContainerTokenIdentifier;
import org.apache.hadoop.yarn.server.api.records.MasterKey;
import org.apache.hadoop.yarn.server.nodemanager.recovery.NMMemoryStateStoreService;
import org.apache.hadoop.yarn.server.security.BaseContainerTokenSecretManager;
import org.apache.hadoop.yarn.server.utils.BuilderUtils;
import org.junit.Test;
public class TestNMContainerTokenSecretManager {
@Test
public void testRecovery() throws IOException {
YarnConfiguration conf = new YarnConfiguration();
conf.setBoolean(YarnConfiguration.NM_RECOVERY_ENABLED, true);
final NodeId nodeId = NodeId.newInstance("somehost", 1234);
final ContainerId cid1 = BuilderUtils.newContainerId(1, 1, 1, 1);
final ContainerId cid2 = BuilderUtils.newContainerId(2, 2, 2, 2);
ContainerTokenKeyGeneratorForTest keygen =
new ContainerTokenKeyGeneratorForTest(conf);
NMMemoryStateStoreService stateStore = new NMMemoryStateStoreService();
stateStore.init(conf);
stateStore.start();
NMContainerTokenSecretManager secretMgr =
new NMContainerTokenSecretManager(conf, stateStore);
secretMgr.setNodeId(nodeId);
MasterKey currentKey = keygen.generateKey();
secretMgr.setMasterKey(currentKey);
ContainerTokenIdentifier tokenId1 =
createContainerTokenId(cid1, nodeId, "user1", secretMgr);
ContainerTokenIdentifier tokenId2 =
createContainerTokenId(cid2, nodeId, "user2", secretMgr);
assertNotNull(secretMgr.retrievePassword(tokenId1));
assertNotNull(secretMgr.retrievePassword(tokenId2));
// restart and verify tokens still valid
secretMgr = new NMContainerTokenSecretManager(conf, stateStore);
secretMgr.setNodeId(nodeId);
secretMgr.recover();
assertEquals(currentKey, secretMgr.getCurrentKey());
assertTrue(secretMgr.isValidStartContainerRequest(tokenId1));
assertTrue(secretMgr.isValidStartContainerRequest(tokenId2));
assertNotNull(secretMgr.retrievePassword(tokenId1));
assertNotNull(secretMgr.retrievePassword(tokenId2));
// roll master key and start a container
secretMgr.startContainerSuccessful(tokenId2);
currentKey = keygen.generateKey();
secretMgr.setMasterKey(currentKey);
// restart and verify tokens still valid due to prev key persist
secretMgr = new NMContainerTokenSecretManager(conf, stateStore);
secretMgr.setNodeId(nodeId);
secretMgr.recover();
assertEquals(currentKey, secretMgr.getCurrentKey());
assertTrue(secretMgr.isValidStartContainerRequest(tokenId1));
assertFalse(secretMgr.isValidStartContainerRequest(tokenId2));
assertNotNull(secretMgr.retrievePassword(tokenId1));
assertNotNull(secretMgr.retrievePassword(tokenId2));
// roll master key again, restart, and verify keys no longer valid
currentKey = keygen.generateKey();
secretMgr.setMasterKey(currentKey);
secretMgr = new NMContainerTokenSecretManager(conf, stateStore);
secretMgr.setNodeId(nodeId);
secretMgr.recover();
assertEquals(currentKey, secretMgr.getCurrentKey());
assertTrue(secretMgr.isValidStartContainerRequest(tokenId1));
assertFalse(secretMgr.isValidStartContainerRequest(tokenId2));
try {
secretMgr.retrievePassword(tokenId1);
fail("token should not be valid");
} catch (InvalidToken e) {
// expected
}
try {
secretMgr.retrievePassword(tokenId2);
fail("token should not be valid");
} catch (InvalidToken e) {
// expected
}
stateStore.close();
}
private static ContainerTokenIdentifier createContainerTokenId(
ContainerId cid, NodeId nodeId, String user,
NMContainerTokenSecretManager secretMgr) throws IOException {
long rmid = cid.getApplicationAttemptId().getApplicationId()
.getClusterTimestamp();
ContainerTokenIdentifier ctid = new ContainerTokenIdentifier(cid,
nodeId.toString(), user, BuilderUtils.newResource(1024, 1),
System.currentTimeMillis() + 100000L,
secretMgr.getCurrentKey().getKeyId(), rmid,
Priority.newInstance(0), 0);
Token token = BuilderUtils.newContainerToken(nodeId,
secretMgr.createPassword(ctid), ctid);
return BuilderUtils.newContainerTokenIdentifier(token);
}
private static class ContainerTokenKeyGeneratorForTest extends
BaseContainerTokenSecretManager {
public ContainerTokenKeyGeneratorForTest(Configuration conf) {
super(conf);
}
public MasterKey generateKey() {
return createNewMasterKey().getMasterKey();
}
}
}
| 6,042 | 40.675862 | 84 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/MockApp.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.nodemanager.webapp;
import java.util.HashMap;
import java.util.Map;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.factories.RecordFactory;
import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.Application;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.ApplicationEvent;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.ApplicationState;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
import org.apache.hadoop.yarn.server.utils.BuilderUtils;
public class MockApp implements Application {
final String user;
final ApplicationId appId;
Map<ContainerId, Container> containers = new HashMap<ContainerId, Container>();
ApplicationState appState;
Application app;
public MockApp(int uniqId) {
this("mockUser", 1234, uniqId);
}
public MockApp(String user, long clusterTimeStamp, int uniqId) {
super();
this.user = user;
// Add an application and the corresponding containers
RecordFactory recordFactory = RecordFactoryProvider
.getRecordFactory(new Configuration());
this.appId = BuilderUtils.newApplicationId(recordFactory, clusterTimeStamp,
uniqId);
appState = ApplicationState.NEW;
}
public void setState(ApplicationState state) {
this.appState = state;
}
public String getUser() {
return user;
}
public Map<ContainerId, Container> getContainers() {
return containers;
}
public ApplicationId getAppId() {
return appId;
}
public ApplicationState getApplicationState() {
return appState;
}
public void handle(ApplicationEvent event) {}
}
| 2,731 | 32.728395 | 95 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/MockContainer.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.nodemanager.webapp;
import java.io.IOException;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
import org.apache.hadoop.yarn.api.records.ContainerStatus;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.event.Dispatcher;
import org.apache.hadoop.yarn.factories.RecordFactory;
import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
import org.apache.hadoop.yarn.security.ContainerTokenIdentifier;
import org.apache.hadoop.yarn.server.api.protocolrecords.NMContainerStatus;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerEvent;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerState;
import org.apache.hadoop.yarn.server.utils.BuilderUtils;
public class MockContainer implements Container {
private ContainerId id;
private ContainerState state;
private String user;
private ContainerLaunchContext launchContext;
private final Map<Path, List<String>> resource =
new HashMap<Path, List<String>>();
private RecordFactory recordFactory;
private final ContainerTokenIdentifier containerTokenIdentifier;
public MockContainer(ApplicationAttemptId appAttemptId,
Dispatcher dispatcher, Configuration conf, String user,
ApplicationId appId, int uniqId) throws IOException{
this.user = user;
this.recordFactory = RecordFactoryProvider.getRecordFactory(conf);
this.id = BuilderUtils.newContainerId(recordFactory, appId, appAttemptId,
uniqId);
this.launchContext = recordFactory
.newRecordInstance(ContainerLaunchContext.class);
long currentTime = System.currentTimeMillis();
this.containerTokenIdentifier =
BuilderUtils.newContainerTokenIdentifier(BuilderUtils
.newContainerToken(id, "127.0.0.1", 1234, user,
BuilderUtils.newResource(1024, 1), currentTime + 10000, 123,
"password".getBytes(), currentTime));
this.state = ContainerState.NEW;
}
public void setState(ContainerState state) {
this.state = state;
}
@Override
public String getUser() {
return user;
}
@Override
public ContainerState getContainerState() {
return state;
}
@Override
public ContainerLaunchContext getLaunchContext() {
return launchContext;
}
@Override
public Credentials getCredentials() {
return null;
}
@Override
public Map<Path, List<String>> getLocalizedResources() {
return resource;
}
@Override
public ContainerStatus cloneAndGetContainerStatus() {
ContainerStatus containerStatus = recordFactory
.newRecordInstance(ContainerStatus.class);
containerStatus
.setState(org.apache.hadoop.yarn.api.records.ContainerState.RUNNING);
containerStatus.setDiagnostics("testing");
containerStatus.setExitStatus(0);
return containerStatus;
}
@Override
public String toString() {
return "";
}
@Override
public void handle(ContainerEvent event) {
}
@Override
public ContainerId getContainerId() {
return this.id;
}
@Override
public Resource getResource() {
return this.containerTokenIdentifier.getResource();
}
@Override
public ContainerTokenIdentifier getContainerTokenIdentifier() {
return this.containerTokenIdentifier;
}
@Override
public NMContainerStatus getNMContainerStatus() {
return null;
}
}
| 4,698 | 31.631944 | 91 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/TestNMWebServices.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.nodemanager.webapp;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.io.File;
import java.io.IOException;
import java.io.PrintWriter;
import java.io.StringReader;
import javax.ws.rs.core.MediaType;
import javax.xml.parsers.DocumentBuilder;
import javax.xml.parsers.DocumentBuilderFactory;
import org.junit.Assert;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.util.NodeHealthScriptRunner;
import org.apache.hadoop.util.VersionInfo;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.event.AsyncDispatcher;
import org.apache.hadoop.yarn.server.nodemanager.Context;
import org.apache.hadoop.yarn.server.nodemanager.LocalDirsHandlerService;
import org.apache.hadoop.yarn.server.nodemanager.NodeHealthCheckerService;
import org.apache.hadoop.yarn.server.nodemanager.NodeManager;
import org.apache.hadoop.yarn.server.nodemanager.ResourceView;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.ApplicationImpl;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerState;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch;
import org.apache.hadoop.yarn.server.nodemanager.webapp.WebServer.NMWebApp;
import org.apache.hadoop.yarn.server.security.ApplicationACLsManager;
import org.apache.hadoop.yarn.server.utils.BuilderUtils;
import org.apache.hadoop.yarn.util.YarnVersionInfo;
import org.apache.hadoop.yarn.webapp.GenericExceptionHandler;
import org.apache.hadoop.yarn.webapp.JerseyTestBase;
import org.apache.hadoop.yarn.webapp.WebApp;
import org.apache.hadoop.yarn.webapp.WebServicesTestUtils;
import org.codehaus.jettison.json.JSONException;
import org.codehaus.jettison.json.JSONObject;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.Test;
import org.w3c.dom.Document;
import org.w3c.dom.Element;
import org.w3c.dom.NodeList;
import org.xml.sax.InputSource;
import com.google.inject.Guice;
import com.google.inject.Injector;
import com.google.inject.servlet.GuiceServletContextListener;
import com.google.inject.servlet.ServletModule;
import com.sun.jersey.api.client.ClientResponse;
import com.sun.jersey.api.client.ClientResponse.Status;
import com.sun.jersey.api.client.UniformInterfaceException;
import com.sun.jersey.api.client.WebResource;
import com.sun.jersey.guice.spi.container.servlet.GuiceContainer;
import com.sun.jersey.test.framework.WebAppDescriptor;
/**
* Test the nodemanager node info web services api's
*/
public class TestNMWebServices extends JerseyTestBase {
private static Context nmContext;
private static ResourceView resourceView;
private static ApplicationACLsManager aclsManager;
private static LocalDirsHandlerService dirsHandler;
private static WebApp nmWebApp;
private static final File testRootDir = new File("target",
TestNMWebServices.class.getSimpleName());
private static File testLogDir = new File("target",
TestNMWebServices.class.getSimpleName() + "LogDir");
private Injector injector = Guice.createInjector(new ServletModule() {
@Override
protected void configureServlets() {
Configuration conf = new Configuration();
conf.set(YarnConfiguration.NM_LOCAL_DIRS, testRootDir.getAbsolutePath());
conf.set(YarnConfiguration.NM_LOG_DIRS, testLogDir.getAbsolutePath());
dirsHandler = new LocalDirsHandlerService();
NodeHealthCheckerService healthChecker = new NodeHealthCheckerService(
NodeManager.getNodeHealthScriptRunner(conf), dirsHandler);
healthChecker.init(conf);
aclsManager = new ApplicationACLsManager(conf);
nmContext = new NodeManager.NMContext(null, null, dirsHandler,
aclsManager, null);
NodeId nodeId = NodeId.newInstance("testhost.foo.com", 8042);
((NodeManager.NMContext)nmContext).setNodeId(nodeId);
resourceView = new ResourceView() {
@Override
public long getVmemAllocatedForContainers() {
// 15.5G in bytes
return new Long("16642998272");
}
@Override
public long getPmemAllocatedForContainers() {
// 16G in bytes
return new Long("17179869184");
}
@Override
public long getVCoresAllocatedForContainers() {
return new Long("4000");
}
@Override
public boolean isVmemCheckEnabled() {
return true;
}
@Override
public boolean isPmemCheckEnabled() {
return true;
}
};
nmWebApp = new NMWebApp(resourceView, aclsManager, dirsHandler);
bind(JAXBContextResolver.class);
bind(NMWebServices.class);
bind(GenericExceptionHandler.class);
bind(Context.class).toInstance(nmContext);
bind(WebApp.class).toInstance(nmWebApp);
bind(ResourceView.class).toInstance(resourceView);
bind(ApplicationACLsManager.class).toInstance(aclsManager);
bind(LocalDirsHandlerService.class).toInstance(dirsHandler);
serve("/*").with(GuiceContainer.class);
}
});
public class GuiceServletConfig extends GuiceServletContextListener {
@Override
protected Injector getInjector() {
return injector;
}
}
@Before
@Override
public void setUp() throws Exception {
super.setUp();
testRootDir.mkdirs();
testLogDir.mkdir();
}
@AfterClass
static public void stop() {
FileUtil.fullyDelete(testRootDir);
FileUtil.fullyDelete(testLogDir);
}
public TestNMWebServices() {
super(new WebAppDescriptor.Builder(
"org.apache.hadoop.yarn.server.nodemanager.webapp")
.contextListenerClass(GuiceServletConfig.class)
.filterClass(com.google.inject.servlet.GuiceFilter.class)
.contextPath("jersey-guice-filter").servletPath("/").build());
}
@Test
public void testInvalidUri() throws JSONException, Exception {
WebResource r = resource();
String responseStr = "";
try {
responseStr = r.path("ws").path("v1").path("node").path("bogus")
.accept(MediaType.APPLICATION_JSON).get(String.class);
fail("should have thrown exception on invalid uri");
} catch (UniformInterfaceException ue) {
ClientResponse response = ue.getResponse();
assertEquals(Status.NOT_FOUND, response.getClientResponseStatus());
WebServicesTestUtils.checkStringMatch(
"error string exists and shouldn't", "", responseStr);
}
}
@Test
public void testInvalidAccept() throws JSONException, Exception {
WebResource r = resource();
String responseStr = "";
try {
responseStr = r.path("ws").path("v1").path("node")
.accept(MediaType.TEXT_PLAIN).get(String.class);
fail("should have thrown exception on invalid uri");
} catch (UniformInterfaceException ue) {
ClientResponse response = ue.getResponse();
assertEquals(Status.INTERNAL_SERVER_ERROR,
response.getClientResponseStatus());
WebServicesTestUtils.checkStringMatch(
"error string exists and shouldn't", "", responseStr);
}
}
@Test
public void testInvalidUri2() throws JSONException, Exception {
WebResource r = resource();
String responseStr = "";
try {
responseStr = r.accept(MediaType.APPLICATION_JSON).get(String.class);
fail("should have thrown exception on invalid uri");
} catch (UniformInterfaceException ue) {
ClientResponse response = ue.getResponse();
assertEquals(Status.NOT_FOUND, response.getClientResponseStatus());
WebServicesTestUtils.checkStringMatch(
"error string exists and shouldn't", "", responseStr);
}
}
@Test
public void testNode() throws JSONException, Exception {
WebResource r = resource();
ClientResponse response = r.path("ws").path("v1").path("node")
.accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject json = response.getEntity(JSONObject.class);
verifyNodeInfo(json);
}
@Test
public void testNodeSlash() throws JSONException, Exception {
WebResource r = resource();
ClientResponse response = r.path("ws").path("v1").path("node/")
.accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject json = response.getEntity(JSONObject.class);
verifyNodeInfo(json);
}
// make sure default is json output
@Test
public void testNodeDefault() throws JSONException, Exception {
WebResource r = resource();
ClientResponse response = r.path("ws").path("v1").path("node")
.get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject json = response.getEntity(JSONObject.class);
verifyNodeInfo(json);
}
@Test
public void testNodeInfo() throws JSONException, Exception {
WebResource r = resource();
ClientResponse response = r.path("ws").path("v1").path("node").path("info")
.accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject json = response.getEntity(JSONObject.class);
verifyNodeInfo(json);
}
@Test
public void testNodeInfoSlash() throws JSONException, Exception {
WebResource r = resource();
ClientResponse response = r.path("ws").path("v1").path("node")
.path("info/").accept(MediaType.APPLICATION_JSON)
.get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject json = response.getEntity(JSONObject.class);
verifyNodeInfo(json);
}
// make sure default is json output
@Test
public void testNodeInfoDefault() throws JSONException, Exception {
WebResource r = resource();
ClientResponse response = r.path("ws").path("v1").path("node").path("info")
.get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject json = response.getEntity(JSONObject.class);
verifyNodeInfo(json);
}
@Test
public void testSingleNodesXML() throws JSONException, Exception {
WebResource r = resource();
ClientResponse response = r.path("ws").path("v1").path("node")
.path("info/").accept(MediaType.APPLICATION_XML)
.get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_XML_TYPE, response.getType());
String xml = response.getEntity(String.class);
DocumentBuilderFactory dbf = DocumentBuilderFactory.newInstance();
DocumentBuilder db = dbf.newDocumentBuilder();
InputSource is = new InputSource();
is.setCharacterStream(new StringReader(xml));
Document dom = db.parse(is);
NodeList nodes = dom.getElementsByTagName("nodeInfo");
assertEquals("incorrect number of elements", 1, nodes.getLength());
verifyNodesXML(nodes);
}
@Test
public void testContainerLogs() throws IOException {
WebResource r = resource();
final ContainerId containerId = BuilderUtils.newContainerId(0, 0, 0, 0);
final String containerIdStr = BuilderUtils.newContainerId(0, 0, 0, 0)
.toString();
final ApplicationAttemptId appAttemptId = containerId.getApplicationAttemptId();
final ApplicationId appId = appAttemptId.getApplicationId();
final String appIdStr = appId.toString();
final String filename = "logfile1";
final String logMessage = "log message\n";
nmContext.getApplications().put(appId, new ApplicationImpl(null, "user",
appId, null, nmContext));
MockContainer container = new MockContainer(appAttemptId,
new AsyncDispatcher(), new Configuration(), "user", appId, 1);
container.setState(ContainerState.RUNNING);
nmContext.getContainers().put(containerId, container);
// write out log file
Path path = dirsHandler.getLogPathForWrite(
ContainerLaunch.getRelativeContainerLogDir(
appIdStr, containerIdStr) + "/" + filename, false);
File logFile = new File(path.toUri().getPath());
logFile.deleteOnExit();
assertTrue("Failed to create log dir", logFile.getParentFile().mkdirs());
PrintWriter pw = new PrintWriter(logFile);
pw.print(logMessage);
pw.close();
// ask for it
ClientResponse response = r.path("ws").path("v1").path("node")
.path("containerlogs").path(containerIdStr).path(filename)
.accept(MediaType.TEXT_PLAIN).get(ClientResponse.class);
String responseText = response.getEntity(String.class);
assertEquals(logMessage, responseText);
// ask for file that doesn't exist
response = r.path("ws").path("v1").path("node")
.path("containerlogs").path(containerIdStr).path("uhhh")
.accept(MediaType.TEXT_PLAIN).get(ClientResponse.class);
Assert.assertEquals(Status.NOT_FOUND.getStatusCode(), response.getStatus());
responseText = response.getEntity(String.class);
assertTrue(responseText.contains("Cannot find this log on the local disk."));
// After container is completed, it is removed from nmContext
nmContext.getContainers().remove(containerId);
Assert.assertNull(nmContext.getContainers().get(containerId));
response =
r.path("ws").path("v1").path("node").path("containerlogs")
.path(containerIdStr).path(filename).accept(MediaType.TEXT_PLAIN)
.get(ClientResponse.class);
responseText = response.getEntity(String.class);
assertEquals(logMessage, responseText);
}
public void verifyNodesXML(NodeList nodes) throws JSONException, Exception {
for (int i = 0; i < nodes.getLength(); i++) {
Element element = (Element) nodes.item(i);
verifyNodeInfoGeneric(WebServicesTestUtils.getXmlString(element, "id"),
WebServicesTestUtils.getXmlString(element, "healthReport"),
WebServicesTestUtils.getXmlLong(element,
"totalVmemAllocatedContainersMB"),
WebServicesTestUtils.getXmlLong(element,
"totalPmemAllocatedContainersMB"),
WebServicesTestUtils.getXmlLong(element,
"totalVCoresAllocatedContainers"),
WebServicesTestUtils.getXmlBoolean(element, "vmemCheckEnabled"),
WebServicesTestUtils.getXmlBoolean(element, "pmemCheckEnabled"),
WebServicesTestUtils.getXmlLong(element, "lastNodeUpdateTime"),
WebServicesTestUtils.getXmlBoolean(element, "nodeHealthy"),
WebServicesTestUtils.getXmlString(element, "nodeHostName"),
WebServicesTestUtils.getXmlString(element, "hadoopVersionBuiltOn"),
WebServicesTestUtils.getXmlString(element, "hadoopBuildVersion"),
WebServicesTestUtils.getXmlString(element, "hadoopVersion"),
WebServicesTestUtils.getXmlString(element,
"nodeManagerVersionBuiltOn"), WebServicesTestUtils.getXmlString(
element, "nodeManagerBuildVersion"),
WebServicesTestUtils.getXmlString(element, "nodeManagerVersion"));
}
}
public void verifyNodeInfo(JSONObject json) throws JSONException, Exception {
assertEquals("incorrect number of elements", 1, json.length());
JSONObject info = json.getJSONObject("nodeInfo");
assertEquals("incorrect number of elements", 17, info.length());
verifyNodeInfoGeneric(info.getString("id"), info.getString("healthReport"),
info.getLong("totalVmemAllocatedContainersMB"),
info.getLong("totalPmemAllocatedContainersMB"),
info.getLong("totalVCoresAllocatedContainers"),
info.getBoolean("vmemCheckEnabled"),
info.getBoolean("pmemCheckEnabled"),
info.getLong("lastNodeUpdateTime"), info.getBoolean("nodeHealthy"),
info.getString("nodeHostName"), info.getString("hadoopVersionBuiltOn"),
info.getString("hadoopBuildVersion"), info.getString("hadoopVersion"),
info.getString("nodeManagerVersionBuiltOn"),
info.getString("nodeManagerBuildVersion"),
info.getString("nodeManagerVersion"));
}
public void verifyNodeInfoGeneric(String id, String healthReport,
long totalVmemAllocatedContainersMB, long totalPmemAllocatedContainersMB,
long totalVCoresAllocatedContainers,
boolean vmemCheckEnabled, boolean pmemCheckEnabled,
long lastNodeUpdateTime, Boolean nodeHealthy, String nodeHostName,
String hadoopVersionBuiltOn, String hadoopBuildVersion,
String hadoopVersion, String resourceManagerVersionBuiltOn,
String resourceManagerBuildVersion, String resourceManagerVersion) {
WebServicesTestUtils.checkStringMatch("id", "testhost.foo.com:8042", id);
WebServicesTestUtils.checkStringMatch("healthReport", "Healthy",
healthReport);
assertEquals("totalVmemAllocatedContainersMB incorrect", 15872,
totalVmemAllocatedContainersMB);
assertEquals("totalPmemAllocatedContainersMB incorrect", 16384,
totalPmemAllocatedContainersMB);
assertEquals("totalVCoresAllocatedContainers incorrect", 4000,
totalVCoresAllocatedContainers);
assertEquals("vmemCheckEnabled incorrect", true, vmemCheckEnabled);
assertEquals("pmemCheckEnabled incorrect", true, pmemCheckEnabled);
assertTrue("lastNodeUpdateTime incorrect", lastNodeUpdateTime == nmContext
.getNodeHealthStatus().getLastHealthReportTime());
assertTrue("nodeHealthy isn't true", nodeHealthy);
WebServicesTestUtils.checkStringMatch("nodeHostName", "testhost.foo.com",
nodeHostName);
WebServicesTestUtils.checkStringMatch("hadoopVersionBuiltOn",
VersionInfo.getDate(), hadoopVersionBuiltOn);
WebServicesTestUtils.checkStringEqual("hadoopBuildVersion",
VersionInfo.getBuildVersion(), hadoopBuildVersion);
WebServicesTestUtils.checkStringMatch("hadoopVersion",
VersionInfo.getVersion(), hadoopVersion);
WebServicesTestUtils.checkStringMatch("resourceManagerVersionBuiltOn",
YarnVersionInfo.getDate(), resourceManagerVersionBuiltOn);
WebServicesTestUtils.checkStringEqual("resourceManagerBuildVersion",
YarnVersionInfo.getBuildVersion(), resourceManagerBuildVersion);
WebServicesTestUtils.checkStringMatch("resourceManagerVersion",
YarnVersionInfo.getVersion(), resourceManagerVersion);
}
}
| 19,593 | 41.319654 | 94 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/TestNMWebServer.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.nodemanager.webapp;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
import java.io.File;
import java.io.FileWriter;
import java.io.IOException;
import java.io.Writer;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.util.NodeHealthScriptRunner;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
import org.apache.hadoop.yarn.api.records.Token;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.event.AsyncDispatcher;
import org.apache.hadoop.yarn.event.Dispatcher;
import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.factories.RecordFactory;
import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
import org.apache.hadoop.yarn.server.nodemanager.Context;
import org.apache.hadoop.yarn.server.nodemanager.LocalDirsHandlerService;
import org.apache.hadoop.yarn.server.nodemanager.NodeHealthCheckerService;
import org.apache.hadoop.yarn.server.nodemanager.NodeManager;
import org.apache.hadoop.yarn.server.nodemanager.ResourceView;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.Application;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerImpl;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerState;
import org.apache.hadoop.yarn.server.nodemanager.metrics.NodeManagerMetrics;
import org.apache.hadoop.yarn.server.nodemanager.recovery.NMNullStateStoreService;
import org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService;
import org.apache.hadoop.yarn.server.security.ApplicationACLsManager;
import org.apache.hadoop.yarn.server.utils.BuilderUtils;
import org.apache.hadoop.yarn.util.ConverterUtils;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
public class TestNMWebServer {
private static final File testRootDir = new File("target",
TestNMWebServer.class.getSimpleName());
private static File testLogDir = new File("target",
TestNMWebServer.class.getSimpleName() + "LogDir");
@Before
public void setup() {
testRootDir.mkdirs();
testLogDir.mkdir();
}
@After
public void tearDown() {
FileUtil.fullyDelete(testRootDir);
FileUtil.fullyDelete(testLogDir);
}
private NodeHealthCheckerService createNodeHealthCheckerService(Configuration conf) {
NodeHealthScriptRunner scriptRunner = NodeManager.getNodeHealthScriptRunner(conf);
LocalDirsHandlerService dirsHandler = new LocalDirsHandlerService();
return new NodeHealthCheckerService(scriptRunner, dirsHandler);
}
private int startNMWebAppServer(String webAddr) {
Context nmContext = new NodeManager.NMContext(null, null, null, null,
null);
ResourceView resourceView = new ResourceView() {
@Override
public long getVmemAllocatedForContainers() {
return 0;
}
@Override
public long getPmemAllocatedForContainers() {
return 0;
}
@Override
public long getVCoresAllocatedForContainers() {
return 0;
}
@Override
public boolean isVmemCheckEnabled() {
return true;
}
@Override
public boolean isPmemCheckEnabled() {
return true;
}
};
Configuration conf = new Configuration();
conf.set(YarnConfiguration.NM_LOCAL_DIRS, testRootDir.getAbsolutePath());
conf.set(YarnConfiguration.NM_LOG_DIRS, testLogDir.getAbsolutePath());
NodeHealthCheckerService healthChecker = createNodeHealthCheckerService(conf);
healthChecker.init(conf);
LocalDirsHandlerService dirsHandler = healthChecker.getDiskHandler();
conf.set(YarnConfiguration.NM_WEBAPP_ADDRESS, webAddr);
WebServer server = new WebServer(nmContext, resourceView,
new ApplicationACLsManager(conf), dirsHandler);
try {
server.init(conf);
server.start();
return server.getPort();
} finally {
server.stop();
healthChecker.stop();
}
}
@Test
public void testNMWebAppWithOutPort() throws IOException {
int port = startNMWebAppServer("0.0.0.0");
validatePortVal(port);
}
private void validatePortVal(int portVal) {
Assert.assertTrue("Port is not updated", portVal > 0);
Assert.assertTrue("Port is default "+ YarnConfiguration.DEFAULT_NM_PORT,
portVal !=YarnConfiguration.DEFAULT_NM_PORT);
}
@Test
public void testNMWebAppWithEphemeralPort() throws IOException {
int port = startNMWebAppServer("0.0.0.0:0");
validatePortVal(port);
}
@Test
public void testNMWebApp() throws IOException, YarnException {
Context nmContext = new NodeManager.NMContext(null, null, null, null,
null);
ResourceView resourceView = new ResourceView() {
@Override
public long getVmemAllocatedForContainers() {
return 0;
}
@Override
public long getPmemAllocatedForContainers() {
return 0;
}
@Override
public long getVCoresAllocatedForContainers() {
return 0;
}
@Override
public boolean isVmemCheckEnabled() {
return true;
}
@Override
public boolean isPmemCheckEnabled() {
return true;
}
};
Configuration conf = new Configuration();
conf.set(YarnConfiguration.NM_LOCAL_DIRS, testRootDir.getAbsolutePath());
conf.set(YarnConfiguration.NM_LOG_DIRS, testLogDir.getAbsolutePath());
NodeHealthCheckerService healthChecker = createNodeHealthCheckerService(conf);
healthChecker.init(conf);
LocalDirsHandlerService dirsHandler = healthChecker.getDiskHandler();
WebServer server = new WebServer(nmContext, resourceView,
new ApplicationACLsManager(conf), dirsHandler);
server.init(conf);
server.start();
// Add an application and the corresponding containers
RecordFactory recordFactory =
RecordFactoryProvider.getRecordFactory(conf);
Dispatcher dispatcher = new AsyncDispatcher();
String user = "nobody";
long clusterTimeStamp = 1234;
ApplicationId appId =
BuilderUtils.newApplicationId(recordFactory, clusterTimeStamp, 1);
Application app = mock(Application.class);
when(app.getUser()).thenReturn(user);
when(app.getAppId()).thenReturn(appId);
nmContext.getApplications().put(appId, app);
ApplicationAttemptId appAttemptId = BuilderUtils.newApplicationAttemptId(
appId, 1);
ContainerId container1 =
BuilderUtils.newContainerId(recordFactory, appId, appAttemptId, 0);
ContainerId container2 =
BuilderUtils.newContainerId(recordFactory, appId, appAttemptId, 1);
NodeManagerMetrics metrics = mock(NodeManagerMetrics.class);
NMStateStoreService stateStore = new NMNullStateStoreService();
for (ContainerId containerId : new ContainerId[] { container1,
container2}) {
// TODO: Use builder utils
ContainerLaunchContext launchContext =
recordFactory.newRecordInstance(ContainerLaunchContext.class);
long currentTime = System.currentTimeMillis();
Token containerToken =
BuilderUtils.newContainerToken(containerId, "127.0.0.1", 1234, user,
BuilderUtils.newResource(1024, 1), currentTime + 10000L, 123,
"password".getBytes(), currentTime);
Container container =
new ContainerImpl(conf, dispatcher, stateStore, launchContext,
null, metrics,
BuilderUtils.newContainerTokenIdentifier(containerToken)) {
@Override
public ContainerState getContainerState() {
return ContainerState.RUNNING;
};
};
nmContext.getContainers().put(containerId, container);
//TODO: Gross hack. Fix in code.
ApplicationId applicationId =
containerId.getApplicationAttemptId().getApplicationId();
nmContext.getApplications().get(applicationId).getContainers()
.put(containerId, container);
writeContainerLogs(nmContext, containerId, dirsHandler);
}
// TODO: Pull logs and test contents.
// Thread.sleep(1000000);
}
private void writeContainerLogs(Context nmContext,
ContainerId containerId, LocalDirsHandlerService dirsHandler)
throws IOException, YarnException {
// ContainerLogDir should be created
File containerLogDir =
ContainerLogsUtils.getContainerLogDirs(containerId,
dirsHandler).get(0);
containerLogDir.mkdirs();
for (String fileType : new String[] { "stdout", "stderr", "syslog" }) {
Writer writer = new FileWriter(new File(containerLogDir, fileType));
writer.write(ConverterUtils.toString(containerId) + "\n Hello "
+ fileType + "!");
writer.close();
}
}
}
| 9,931 | 37.645914 | 91 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/TestContainerLogsPage.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.nodemanager.webapp;
import static org.junit.Assume.assumeTrue;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.spy;
import static org.mockito.Mockito.when;
import static org.mockito.Mockito.verify;
import java.io.BufferedOutputStream;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.PrintWriter;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.nativeio.NativeIO;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.util.NodeHealthScriptRunner;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.impl.pb.ContainerIdPBImpl;
import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationAttemptIdPBImpl;
import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationIdPBImpl;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerImpl;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.event.AsyncDispatcher;
import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.factories.RecordFactory;
import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
import org.apache.hadoop.yarn.server.nodemanager.Context;
import org.apache.hadoop.yarn.server.nodemanager.LocalDirsHandlerService;
import org.apache.hadoop.yarn.server.nodemanager.NodeHealthCheckerService;
import org.apache.hadoop.yarn.server.nodemanager.NodeManager;
import org.apache.hadoop.yarn.server.nodemanager.NodeManager.NMContext;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.Application;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerState;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch;
import org.apache.hadoop.yarn.server.nodemanager.recovery.NMNullStateStoreService;
import org.apache.hadoop.yarn.server.nodemanager.webapp.ContainerLogsPage.ContainersLogsBlock;
import org.apache.hadoop.yarn.server.security.ApplicationACLsManager;
import org.apache.hadoop.yarn.server.utils.BuilderUtils;
import org.apache.hadoop.yarn.webapp.YarnWebParams;
import org.apache.hadoop.yarn.webapp.test.WebAppTests;
import org.junit.Assert;
import org.junit.Test;
import com.google.inject.Injector;
import com.google.inject.Module;
public class TestContainerLogsPage {
private NodeHealthCheckerService createNodeHealthCheckerService(Configuration conf) {
NodeHealthScriptRunner scriptRunner = NodeManager.getNodeHealthScriptRunner(conf);
LocalDirsHandlerService dirsHandler = new LocalDirsHandlerService();
return new NodeHealthCheckerService(scriptRunner, dirsHandler);
}
@Test(timeout=30000)
public void testContainerLogDirs() throws IOException, YarnException {
File absLogDir = new File("target",
TestNMWebServer.class.getSimpleName() + "LogDir").getAbsoluteFile();
String logdirwithFile = absLogDir.toURI().toString();
Configuration conf = new Configuration();
conf.set(YarnConfiguration.NM_LOG_DIRS, logdirwithFile);
NodeHealthCheckerService healthChecker = createNodeHealthCheckerService(conf);
healthChecker.init(conf);
LocalDirsHandlerService dirsHandler = healthChecker.getDiskHandler();
NMContext nmContext = new NodeManager.NMContext(null, null, dirsHandler,
new ApplicationACLsManager(conf), new NMNullStateStoreService());
// Add an application and the corresponding containers
RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(conf);
String user = "nobody";
long clusterTimeStamp = 1234;
ApplicationId appId = BuilderUtils.newApplicationId(recordFactory,
clusterTimeStamp, 1);
Application app = mock(Application.class);
when(app.getUser()).thenReturn(user);
when(app.getAppId()).thenReturn(appId);
ApplicationAttemptId appAttemptId = BuilderUtils.newApplicationAttemptId(
appId, 1);
ContainerId container1 = BuilderUtils.newContainerId(recordFactory, appId,
appAttemptId, 0);
nmContext.getApplications().put(appId, app);
MockContainer container =
new MockContainer(appAttemptId, new AsyncDispatcher(), conf, user,
appId, 1);
container.setState(ContainerState.RUNNING);
nmContext.getContainers().put(container1, container);
List<File> files = null;
files = ContainerLogsUtils.getContainerLogDirs(container1, user, nmContext);
Assert.assertTrue(!(files.get(0).toString().contains("file:")));
// After container is completed, it is removed from nmContext
nmContext.getContainers().remove(container1);
Assert.assertNull(nmContext.getContainers().get(container1));
files = ContainerLogsUtils.getContainerLogDirs(container1, user, nmContext);
Assert.assertTrue(!(files.get(0).toString().contains("file:")));
// Create a new context to check if correct container log dirs are fetched
// on full disk.
LocalDirsHandlerService dirsHandlerForFullDisk = spy(dirsHandler);
// good log dirs are empty and nm log dir is in the full log dir list.
when(dirsHandlerForFullDisk.getLogDirs()).
thenReturn(new ArrayList<String>());
when(dirsHandlerForFullDisk.getLogDirsForRead()).
thenReturn(Arrays.asList(new String[] {absLogDir.getAbsolutePath()}));
nmContext = new NodeManager.NMContext(null, null, dirsHandlerForFullDisk,
new ApplicationACLsManager(conf), new NMNullStateStoreService());
nmContext.getApplications().put(appId, app);
container.setState(ContainerState.RUNNING);
nmContext.getContainers().put(container1, container);
List<File> dirs =
ContainerLogsUtils.getContainerLogDirs(container1, user, nmContext);
File containerLogDir = new File(absLogDir, appId + "/" + container1);
Assert.assertTrue(dirs.contains(containerLogDir));
}
@Test(timeout=30000)
public void testContainerLogFile() throws IOException, YarnException {
File absLogDir = new File("target",
TestNMWebServer.class.getSimpleName() + "LogDir").getAbsoluteFile();
String logdirwithFile = absLogDir.toURI().toString();
Configuration conf = new Configuration();
conf.set(YarnConfiguration.NM_LOG_DIRS, logdirwithFile);
conf.setFloat(YarnConfiguration.NM_MAX_PER_DISK_UTILIZATION_PERCENTAGE,
0.0f);
LocalDirsHandlerService dirsHandler = new LocalDirsHandlerService();
dirsHandler.init(conf);
NMContext nmContext = new NodeManager.NMContext(null, null, dirsHandler,
new ApplicationACLsManager(conf), new NMNullStateStoreService());
// Add an application and the corresponding containers
String user = "nobody";
long clusterTimeStamp = 1234;
ApplicationId appId = BuilderUtils.newApplicationId(
clusterTimeStamp, 1);
Application app = mock(Application.class);
when(app.getUser()).thenReturn(user);
when(app.getAppId()).thenReturn(appId);
ApplicationAttemptId appAttemptId = BuilderUtils.newApplicationAttemptId(
appId, 1);
ContainerId containerId = BuilderUtils.newContainerId(
appAttemptId, 1);
nmContext.getApplications().put(appId, app);
MockContainer container =
new MockContainer(appAttemptId, new AsyncDispatcher(), conf, user,
appId, 1);
container.setState(ContainerState.RUNNING);
nmContext.getContainers().put(containerId, container);
File containerLogDir = new File(absLogDir,
ContainerLaunch.getRelativeContainerLogDir(appId.toString(),
containerId.toString()));
containerLogDir.mkdirs();
String fileName = "fileName";
File containerLogFile = new File(containerLogDir, fileName);
containerLogFile.createNewFile();
File file = ContainerLogsUtils.getContainerLogFile(containerId,
fileName, user, nmContext);
Assert.assertEquals(containerLogFile.toURI().toString(),
file.toURI().toString());
FileUtil.fullyDelete(absLogDir);
}
@Test(timeout = 10000)
public void testContainerLogPageAccess() throws IOException {
// SecureIOUtils require Native IO to be enabled. This test will run
// only if it is enabled.
assumeTrue(NativeIO.isAvailable());
String user = "randomUser" + System.currentTimeMillis();
File absLogDir = null, appDir = null, containerDir = null, syslog = null;
try {
// target log directory
absLogDir =
new File("target", TestContainerLogsPage.class.getSimpleName()
+ "LogDir").getAbsoluteFile();
absLogDir.mkdir();
Configuration conf = new Configuration();
conf.set(YarnConfiguration.NM_LOG_DIRS, absLogDir.toURI().toString());
conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,
"kerberos");
UserGroupInformation.setConfiguration(conf);
NodeHealthCheckerService healthChecker = createNodeHealthCheckerService(conf);
healthChecker.init(conf);
LocalDirsHandlerService dirsHandler = healthChecker.getDiskHandler();
// Add an application and the corresponding containers
RecordFactory recordFactory =
RecordFactoryProvider.getRecordFactory(conf);
long clusterTimeStamp = 1234;
ApplicationId appId =
BuilderUtils.newApplicationId(recordFactory, clusterTimeStamp, 1);
Application app = mock(Application.class);
when(app.getAppId()).thenReturn(appId);
// Making sure that application returns a random user. This is required
// for SecureIOUtils' file owner check.
when(app.getUser()).thenReturn(user);
ApplicationAttemptId appAttemptId =
BuilderUtils.newApplicationAttemptId(appId, 1);
ContainerId container1 =
BuilderUtils.newContainerId(recordFactory, appId, appAttemptId, 0);
// Testing secure read access for log files
// Creating application and container directory and syslog file.
appDir = new File(absLogDir, appId.toString());
appDir.mkdir();
containerDir = new File(appDir, container1.toString());
containerDir.mkdir();
syslog = new File(containerDir, "syslog");
syslog.createNewFile();
BufferedOutputStream out =
new BufferedOutputStream(new FileOutputStream(syslog));
out.write("Log file Content".getBytes());
out.close();
Context context = mock(Context.class);
ConcurrentMap<ApplicationId, Application> appMap =
new ConcurrentHashMap<ApplicationId, Application>();
appMap.put(appId, app);
when(context.getApplications()).thenReturn(appMap);
ConcurrentHashMap<ContainerId, Container> containers =
new ConcurrentHashMap<ContainerId, Container>();
when(context.getContainers()).thenReturn(containers);
when(context.getLocalDirsHandler()).thenReturn(dirsHandler);
MockContainer container = new MockContainer(appAttemptId,
new AsyncDispatcher(), conf, user, appId, 1);
container.setState(ContainerState.RUNNING);
context.getContainers().put(container1, container);
ContainersLogsBlock cLogsBlock =
new ContainersLogsBlock(context);
Map<String, String> params = new HashMap<String, String>();
params.put(YarnWebParams.CONTAINER_ID, container1.toString());
params.put(YarnWebParams.CONTAINER_LOG_TYPE, "syslog");
Injector injector =
WebAppTests.testPage(ContainerLogsPage.class,
ContainersLogsBlock.class, cLogsBlock, params, (Module[])null);
PrintWriter spyPw = WebAppTests.getPrintWriter(injector);
verify(spyPw).write(
"Exception reading log file. Application submitted by '" + user
+ "' doesn't own requested log file : syslog");
} finally {
if (syslog != null) {
syslog.delete();
}
if (containerDir != null) {
containerDir.delete();
}
if (appDir != null) {
appDir.delete();
}
if (absLogDir != null) {
absLogDir.delete();
}
}
}
@Test
public void testLogDirWithDriveLetter() throws Exception {
//To verify that logs paths which include drive letters (Windows)
//do not lose their drive letter specification
LocalDirsHandlerService localDirs = mock(LocalDirsHandlerService.class);
List<String> logDirs = new ArrayList<String>();
logDirs.add("F:/nmlogs");
when(localDirs.getLogDirsForRead()).thenReturn(logDirs);
ApplicationIdPBImpl appId = mock(ApplicationIdPBImpl.class);
when(appId.toString()).thenReturn("app_id_1");
ApplicationAttemptIdPBImpl appAttemptId =
mock(ApplicationAttemptIdPBImpl.class);
when(appAttemptId.getApplicationId()).thenReturn(appId);
ContainerId containerId = mock(ContainerIdPBImpl.class);
when(containerId.getApplicationAttemptId()).thenReturn(appAttemptId);
List<File> logDirFiles = ContainerLogsUtils.getContainerLogDirs(
containerId, localDirs);
Assert.assertTrue("logDir lost drive letter " +
logDirFiles.get(0),
logDirFiles.get(0).toString().indexOf("F:" + File.separator +
"nmlogs") > -1);
}
@Test
public void testLogFileWithDriveLetter() throws Exception {
ContainerImpl container = mock(ContainerImpl.class);
ApplicationIdPBImpl appId = mock(ApplicationIdPBImpl.class);
when(appId.toString()).thenReturn("appId");
Application app = mock(Application.class);
when(app.getAppId()).thenReturn(appId);
ApplicationAttemptIdPBImpl appAttemptId =
mock(ApplicationAttemptIdPBImpl.class);
when(appAttemptId.getApplicationId()).thenReturn(appId);
ConcurrentMap<ApplicationId, Application> applications =
new ConcurrentHashMap<ApplicationId, Application>();
applications.put(appId, app);
ContainerId containerId = mock(ContainerIdPBImpl.class);
when(containerId.toString()).thenReturn("containerId");
when(containerId.getApplicationAttemptId()).thenReturn(appAttemptId);
ConcurrentMap<ContainerId, Container> containers =
new ConcurrentHashMap<ContainerId, Container>();
containers.put(containerId, container);
LocalDirsHandlerService localDirs = mock(LocalDirsHandlerService.class);
when(localDirs.getLogPathToRead("appId" + Path.SEPARATOR + "containerId" +
Path.SEPARATOR + "fileName"))
.thenReturn(new Path("F:/nmlogs/appId/containerId/fileName"));
NMContext context = mock(NMContext.class);
when(context.getLocalDirsHandler()).thenReturn(localDirs);
when(context.getApplications()).thenReturn(applications);
when(context.getContainers()).thenReturn(containers);
File logFile = ContainerLogsUtils.getContainerLogFile(containerId,
"fileName", null, context);
Assert.assertTrue("logFile lost drive letter " +
logFile,
logFile.toString().indexOf("F:" + File.separator + "nmlogs") > -1);
}
}
| 16,355 | 43.086253 | 94 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/TestNMWebServicesContainers.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.nodemanager.webapp;
import static org.apache.hadoop.yarn.util.StringHelper.ujoin;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.io.File;
import java.io.IOException;
import java.io.StringReader;
import java.util.HashMap;
import javax.ws.rs.core.MediaType;
import javax.xml.parsers.DocumentBuilder;
import javax.xml.parsers.DocumentBuilderFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.util.NodeHealthScriptRunner;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.event.AsyncDispatcher;
import org.apache.hadoop.yarn.event.Dispatcher;
import org.apache.hadoop.yarn.server.nodemanager.Context;
import org.apache.hadoop.yarn.server.nodemanager.LocalDirsHandlerService;
import org.apache.hadoop.yarn.server.nodemanager.NodeHealthCheckerService;
import org.apache.hadoop.yarn.server.nodemanager.NodeManager;
import org.apache.hadoop.yarn.server.nodemanager.ResourceView;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.Application;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
import org.apache.hadoop.yarn.server.nodemanager.webapp.WebServer.NMWebApp;
import org.apache.hadoop.yarn.server.security.ApplicationACLsManager;
import org.apache.hadoop.yarn.server.utils.BuilderUtils;
import org.apache.hadoop.yarn.util.ConverterUtils;
import org.apache.hadoop.yarn.webapp.GenericExceptionHandler;
import org.apache.hadoop.yarn.webapp.JerseyTestBase;
import org.apache.hadoop.yarn.webapp.WebApp;
import org.apache.hadoop.yarn.webapp.WebServicesTestUtils;
import org.codehaus.jettison.json.JSONArray;
import org.codehaus.jettison.json.JSONException;
import org.codehaus.jettison.json.JSONObject;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.Test;
import org.w3c.dom.Document;
import org.w3c.dom.Element;
import org.w3c.dom.NodeList;
import org.xml.sax.InputSource;
import com.google.inject.Guice;
import com.google.inject.Injector;
import com.google.inject.servlet.GuiceServletContextListener;
import com.google.inject.servlet.ServletModule;
import com.sun.jersey.api.client.ClientResponse;
import com.sun.jersey.api.client.ClientResponse.Status;
import com.sun.jersey.api.client.UniformInterfaceException;
import com.sun.jersey.api.client.WebResource;
import com.sun.jersey.guice.spi.container.servlet.GuiceContainer;
import com.sun.jersey.test.framework.WebAppDescriptor;
public class TestNMWebServicesContainers extends JerseyTestBase {
private static Context nmContext;
private static ResourceView resourceView;
private static ApplicationACLsManager aclsManager;
private static LocalDirsHandlerService dirsHandler;
private static WebApp nmWebApp;
private static Configuration conf = new Configuration();
private static final File testRootDir = new File("target",
TestNMWebServicesContainers.class.getSimpleName());
private static File testLogDir = new File("target",
TestNMWebServicesContainers.class.getSimpleName() + "LogDir");
private Injector injector = Guice.createInjector(new ServletModule() {
@Override
protected void configureServlets() {
resourceView = new ResourceView() {
@Override
public long getVmemAllocatedForContainers() {
// 15.5G in bytes
return new Long("16642998272");
}
@Override
public long getPmemAllocatedForContainers() {
// 16G in bytes
return new Long("17179869184");
}
@Override
public long getVCoresAllocatedForContainers() {
return new Long("4000");
}
@Override
public boolean isVmemCheckEnabled() {
return true;
}
@Override
public boolean isPmemCheckEnabled() {
return true;
}
};
conf.set(YarnConfiguration.NM_LOCAL_DIRS, testRootDir.getAbsolutePath());
conf.set(YarnConfiguration.NM_LOG_DIRS, testLogDir.getAbsolutePath());
LocalDirsHandlerService dirsHandler = new LocalDirsHandlerService();
NodeHealthCheckerService healthChecker = new NodeHealthCheckerService(
NodeManager.getNodeHealthScriptRunner(conf), dirsHandler);
healthChecker.init(conf);
dirsHandler = healthChecker.getDiskHandler();
aclsManager = new ApplicationACLsManager(conf);
nmContext = new NodeManager.NMContext(null, null, dirsHandler,
aclsManager, null) {
public NodeId getNodeId() {
return NodeId.newInstance("testhost.foo.com", 8042);
};
public int getHttpPort() {
return 1234;
};
};
nmWebApp = new NMWebApp(resourceView, aclsManager, dirsHandler);
bind(JAXBContextResolver.class);
bind(NMWebServices.class);
bind(GenericExceptionHandler.class);
bind(Context.class).toInstance(nmContext);
bind(WebApp.class).toInstance(nmWebApp);
bind(ResourceView.class).toInstance(resourceView);
bind(ApplicationACLsManager.class).toInstance(aclsManager);
bind(LocalDirsHandlerService.class).toInstance(dirsHandler);
serve("/*").with(GuiceContainer.class);
}
});
public class GuiceServletConfig extends GuiceServletContextListener {
@Override
protected Injector getInjector() {
return injector;
}
}
@Before
@Override
public void setUp() throws Exception {
super.setUp();
testRootDir.mkdirs();
testLogDir.mkdir();
}
@AfterClass
static public void cleanup() {
FileUtil.fullyDelete(testRootDir);
FileUtil.fullyDelete(testLogDir);
}
public TestNMWebServicesContainers() {
super(new WebAppDescriptor.Builder(
"org.apache.hadoop.yarn.server.nodemanager.webapp")
.contextListenerClass(GuiceServletConfig.class)
.filterClass(com.google.inject.servlet.GuiceFilter.class)
.contextPath("jersey-guice-filter").servletPath("/").build());
}
@Test
public void testNodeContainersNone() throws JSONException, Exception {
WebResource r = resource();
ClientResponse response = r.path("ws").path("v1").path("node")
.path("containers").accept(MediaType.APPLICATION_JSON)
.get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject json = response.getEntity(JSONObject.class);
assertEquals("apps isn't NULL", JSONObject.NULL, json.get("containers"));
}
private HashMap<String, String> addAppContainers(Application app)
throws IOException {
Dispatcher dispatcher = new AsyncDispatcher();
ApplicationAttemptId appAttemptId = BuilderUtils.newApplicationAttemptId(
app.getAppId(), 1);
Container container1 = new MockContainer(appAttemptId, dispatcher, conf,
app.getUser(), app.getAppId(), 1);
Container container2 = new MockContainer(appAttemptId, dispatcher, conf,
app.getUser(), app.getAppId(), 2);
nmContext.getContainers()
.put(container1.getContainerId(), container1);
nmContext.getContainers()
.put(container2.getContainerId(), container2);
app.getContainers().put(container1.getContainerId(), container1);
app.getContainers().put(container2.getContainerId(), container2);
HashMap<String, String> hash = new HashMap<String, String>();
hash.put(container1.getContainerId().toString(), container1
.getContainerId().toString());
hash.put(container2.getContainerId().toString(), container2
.getContainerId().toString());
return hash;
}
@Test
public void testNodeContainers() throws JSONException, Exception {
testNodeHelper("containers", MediaType.APPLICATION_JSON);
}
@Test
public void testNodeContainersSlash() throws JSONException, Exception {
testNodeHelper("containers/", MediaType.APPLICATION_JSON);
}
// make sure default is json output
@Test
public void testNodeContainersDefault() throws JSONException, Exception {
testNodeHelper("containers/", "");
}
public void testNodeHelper(String path, String media) throws JSONException,
Exception {
WebResource r = resource();
Application app = new MockApp(1);
nmContext.getApplications().put(app.getAppId(), app);
addAppContainers(app);
Application app2 = new MockApp(2);
nmContext.getApplications().put(app2.getAppId(), app2);
addAppContainers(app2);
ClientResponse response = r.path("ws").path("v1").path("node").path(path)
.accept(media).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject json = response.getEntity(JSONObject.class);
JSONObject info = json.getJSONObject("containers");
assertEquals("incorrect number of elements", 1, info.length());
JSONArray conInfo = info.getJSONArray("container");
assertEquals("incorrect number of elements", 4, conInfo.length());
for (int i = 0; i < conInfo.length(); i++) {
verifyNodeContainerInfo(
conInfo.getJSONObject(i),
nmContext.getContainers().get(
ConverterUtils.toContainerId(conInfo.getJSONObject(i).getString(
"id"))));
}
}
@Test
public void testNodeSingleContainers() throws JSONException, Exception {
testNodeSingleContainersHelper(MediaType.APPLICATION_JSON);
}
@Test
public void testNodeSingleContainersSlash() throws JSONException, Exception {
testNodeSingleContainersHelper(MediaType.APPLICATION_JSON);
}
@Test
public void testNodeSingleContainersDefault() throws JSONException, Exception {
testNodeSingleContainersHelper("");
}
public void testNodeSingleContainersHelper(String media)
throws JSONException, Exception {
WebResource r = resource();
Application app = new MockApp(1);
nmContext.getApplications().put(app.getAppId(), app);
HashMap<String, String> hash = addAppContainers(app);
Application app2 = new MockApp(2);
nmContext.getApplications().put(app2.getAppId(), app2);
addAppContainers(app2);
for (String id : hash.keySet()) {
ClientResponse response = r.path("ws").path("v1").path("node")
.path("containers").path(id).accept(media).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject json = response.getEntity(JSONObject.class);
verifyNodeContainerInfo(json.getJSONObject("container"), nmContext
.getContainers().get(ConverterUtils.toContainerId(id)));
}
}
@Test
public void testSingleContainerInvalid() throws JSONException, Exception {
WebResource r = resource();
Application app = new MockApp(1);
nmContext.getApplications().put(app.getAppId(), app);
addAppContainers(app);
Application app2 = new MockApp(2);
nmContext.getApplications().put(app2.getAppId(), app2);
addAppContainers(app2);
try {
r.path("ws").path("v1").path("node").path("containers")
.path("container_foo_1234").accept(MediaType.APPLICATION_JSON)
.get(JSONObject.class);
fail("should have thrown exception on invalid user query");
} catch (UniformInterfaceException ue) {
ClientResponse response = ue.getResponse();
assertEquals(Status.BAD_REQUEST, response.getClientResponseStatus());
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject msg = response.getEntity(JSONObject.class);
JSONObject exception = msg.getJSONObject("RemoteException");
assertEquals("incorrect number of elements", 3, exception.length());
String message = exception.getString("message");
String type = exception.getString("exception");
String classname = exception.getString("javaClassName");
WebServicesTestUtils.checkStringMatch("exception message",
"java.lang.Exception: invalid container id, container_foo_1234",
message);
WebServicesTestUtils.checkStringMatch("exception type",
"BadRequestException", type);
WebServicesTestUtils.checkStringMatch("exception classname",
"org.apache.hadoop.yarn.webapp.BadRequestException", classname);
}
}
@Test
public void testSingleContainerInvalid2() throws JSONException, Exception {
WebResource r = resource();
Application app = new MockApp(1);
nmContext.getApplications().put(app.getAppId(), app);
addAppContainers(app);
Application app2 = new MockApp(2);
nmContext.getApplications().put(app2.getAppId(), app2);
addAppContainers(app2);
try {
r.path("ws").path("v1").path("node").path("containers")
.path("container_1234_0001").accept(MediaType.APPLICATION_JSON)
.get(JSONObject.class);
fail("should have thrown exception on invalid user query");
} catch (UniformInterfaceException ue) {
ClientResponse response = ue.getResponse();
assertEquals(Status.BAD_REQUEST, response.getClientResponseStatus());
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject msg = response.getEntity(JSONObject.class);
JSONObject exception = msg.getJSONObject("RemoteException");
assertEquals("incorrect number of elements", 3, exception.length());
String message = exception.getString("message");
String type = exception.getString("exception");
String classname = exception.getString("javaClassName");
WebServicesTestUtils.checkStringMatch("exception message",
"java.lang.Exception: invalid container id, container_1234_0001",
message);
WebServicesTestUtils.checkStringMatch("exception type",
"BadRequestException", type);
WebServicesTestUtils.checkStringMatch("exception classname",
"org.apache.hadoop.yarn.webapp.BadRequestException", classname);
}
}
@Test
public void testSingleContainerWrong() throws JSONException, Exception {
WebResource r = resource();
Application app = new MockApp(1);
nmContext.getApplications().put(app.getAppId(), app);
addAppContainers(app);
Application app2 = new MockApp(2);
nmContext.getApplications().put(app2.getAppId(), app2);
addAppContainers(app2);
try {
r.path("ws").path("v1").path("node").path("containers")
.path("container_1234_0001_01_000005")
.accept(MediaType.APPLICATION_JSON).get(JSONObject.class);
fail("should have thrown exception on invalid user query");
} catch (UniformInterfaceException ue) {
ClientResponse response = ue.getResponse();
assertEquals(Status.NOT_FOUND, response.getClientResponseStatus());
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject msg = response.getEntity(JSONObject.class);
JSONObject exception = msg.getJSONObject("RemoteException");
assertEquals("incorrect number of elements", 3, exception.length());
String message = exception.getString("message");
String type = exception.getString("exception");
String classname = exception.getString("javaClassName");
WebServicesTestUtils
.checkStringMatch(
"exception message",
"java.lang.Exception: container with id, container_1234_0001_01_000005, not found",
message);
WebServicesTestUtils.checkStringMatch("exception type",
"NotFoundException", type);
WebServicesTestUtils.checkStringMatch("exception classname",
"org.apache.hadoop.yarn.webapp.NotFoundException", classname);
}
}
@Test
public void testNodeSingleContainerXML() throws JSONException, Exception {
WebResource r = resource();
Application app = new MockApp(1);
nmContext.getApplications().put(app.getAppId(), app);
HashMap<String, String> hash = addAppContainers(app);
Application app2 = new MockApp(2);
nmContext.getApplications().put(app2.getAppId(), app2);
addAppContainers(app2);
for (String id : hash.keySet()) {
ClientResponse response = r.path("ws").path("v1").path("node")
.path("containers").path(id).accept(MediaType.APPLICATION_XML)
.get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_XML_TYPE, response.getType());
String xml = response.getEntity(String.class);
DocumentBuilderFactory dbf = DocumentBuilderFactory.newInstance();
DocumentBuilder db = dbf.newDocumentBuilder();
InputSource is = new InputSource();
is.setCharacterStream(new StringReader(xml));
Document dom = db.parse(is);
NodeList nodes = dom.getElementsByTagName("container");
assertEquals("incorrect number of elements", 1, nodes.getLength());
verifyContainersInfoXML(nodes,
nmContext.getContainers().get(ConverterUtils.toContainerId(id)));
}
}
@Test
public void testNodeContainerXML() throws JSONException, Exception {
WebResource r = resource();
Application app = new MockApp(1);
nmContext.getApplications().put(app.getAppId(), app);
addAppContainers(app);
Application app2 = new MockApp(2);
nmContext.getApplications().put(app2.getAppId(), app2);
addAppContainers(app2);
ClientResponse response = r.path("ws").path("v1").path("node")
.path("containers").accept(MediaType.APPLICATION_XML)
.get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_XML_TYPE, response.getType());
String xml = response.getEntity(String.class);
DocumentBuilderFactory dbf = DocumentBuilderFactory.newInstance();
DocumentBuilder db = dbf.newDocumentBuilder();
InputSource is = new InputSource();
is.setCharacterStream(new StringReader(xml));
Document dom = db.parse(is);
NodeList nodes = dom.getElementsByTagName("container");
assertEquals("incorrect number of elements", 4, nodes.getLength());
}
public void verifyContainersInfoXML(NodeList nodes, Container cont)
throws JSONException, Exception {
for (int i = 0; i < nodes.getLength(); i++) {
Element element = (Element) nodes.item(i);
verifyNodeContainerInfoGeneric(cont,
WebServicesTestUtils.getXmlString(element, "id"),
WebServicesTestUtils.getXmlString(element, "state"),
WebServicesTestUtils.getXmlString(element, "user"),
WebServicesTestUtils.getXmlInt(element, "exitCode"),
WebServicesTestUtils.getXmlString(element, "diagnostics"),
WebServicesTestUtils.getXmlString(element, "nodeId"),
WebServicesTestUtils.getXmlInt(element, "totalMemoryNeededMB"),
WebServicesTestUtils.getXmlInt(element, "totalVCoresNeeded"),
WebServicesTestUtils.getXmlString(element, "containerLogsLink"));
}
}
public void verifyNodeContainerInfo(JSONObject info, Container cont)
throws JSONException, Exception {
assertEquals("incorrect number of elements", 9, info.length());
verifyNodeContainerInfoGeneric(cont, info.getString("id"),
info.getString("state"), info.getString("user"),
info.getInt("exitCode"), info.getString("diagnostics"),
info.getString("nodeId"), info.getInt("totalMemoryNeededMB"),
info.getInt("totalVCoresNeeded"),
info.getString("containerLogsLink"));
}
public void verifyNodeContainerInfoGeneric(Container cont, String id,
String state, String user, int exitCode, String diagnostics,
String nodeId, int totalMemoryNeededMB, int totalVCoresNeeded,
String logsLink)
throws JSONException, Exception {
WebServicesTestUtils.checkStringMatch("id", cont.getContainerId()
.toString(), id);
WebServicesTestUtils.checkStringMatch("state", cont.getContainerState()
.toString(), state);
WebServicesTestUtils.checkStringMatch("user", cont.getUser().toString(),
user);
assertEquals("exitCode wrong", 0, exitCode);
WebServicesTestUtils
.checkStringMatch("diagnostics", "testing", diagnostics);
WebServicesTestUtils.checkStringMatch("nodeId", nmContext.getNodeId()
.toString(), nodeId);
assertEquals("totalMemoryNeededMB wrong",
YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_MB,
totalMemoryNeededMB);
assertEquals("totalVCoresNeeded wrong",
YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES,
totalVCoresNeeded);
String shortLink =
ujoin("containerlogs", cont.getContainerId().toString(),
cont.getUser());
assertTrue("containerLogsLink wrong", logsLink.contains(shortLink));
}
}
| 21,529 | 40.483622 | 97 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/TestNMWebServicesApps.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.nodemanager.webapp;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.io.File;
import java.io.IOException;
import java.io.StringReader;
import java.util.HashMap;
import javax.ws.rs.core.MediaType;
import javax.xml.parsers.DocumentBuilder;
import javax.xml.parsers.DocumentBuilderFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.util.NodeHealthScriptRunner;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.event.AsyncDispatcher;
import org.apache.hadoop.yarn.event.Dispatcher;
import org.apache.hadoop.yarn.server.nodemanager.Context;
import org.apache.hadoop.yarn.server.nodemanager.LocalDirsHandlerService;
import org.apache.hadoop.yarn.server.nodemanager.NodeHealthCheckerService;
import org.apache.hadoop.yarn.server.nodemanager.NodeManager;
import org.apache.hadoop.yarn.server.nodemanager.ResourceView;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.Application;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.ApplicationState;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
import org.apache.hadoop.yarn.server.nodemanager.webapp.WebServer.NMWebApp;
import org.apache.hadoop.yarn.server.security.ApplicationACLsManager;
import org.apache.hadoop.yarn.server.utils.BuilderUtils;
import org.apache.hadoop.yarn.webapp.GenericExceptionHandler;
import org.apache.hadoop.yarn.webapp.JerseyTestBase;
import org.apache.hadoop.yarn.webapp.WebApp;
import org.apache.hadoop.yarn.webapp.WebServicesTestUtils;
import org.codehaus.jettison.json.JSONArray;
import org.codehaus.jettison.json.JSONException;
import org.codehaus.jettison.json.JSONObject;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.Test;
import org.w3c.dom.Document;
import org.w3c.dom.Element;
import org.w3c.dom.Node;
import org.w3c.dom.NodeList;
import org.xml.sax.InputSource;
import com.google.inject.Guice;
import com.google.inject.Injector;
import com.google.inject.servlet.GuiceServletContextListener;
import com.google.inject.servlet.ServletModule;
import com.sun.jersey.api.client.ClientResponse;
import com.sun.jersey.api.client.ClientResponse.Status;
import com.sun.jersey.api.client.UniformInterfaceException;
import com.sun.jersey.api.client.WebResource;
import com.sun.jersey.guice.spi.container.servlet.GuiceContainer;
import com.sun.jersey.test.framework.WebAppDescriptor;
public class TestNMWebServicesApps extends JerseyTestBase {
private static Context nmContext;
private static ResourceView resourceView;
private static ApplicationACLsManager aclsManager;
private static LocalDirsHandlerService dirsHandler;
private static WebApp nmWebApp;
private static Configuration conf = new Configuration();
private static final File testRootDir = new File("target",
TestNMWebServicesApps.class.getSimpleName());
private static File testLogDir = new File("target",
TestNMWebServicesApps.class.getSimpleName() + "LogDir");
private Injector injector = Guice.createInjector(new ServletModule() {
@Override
protected void configureServlets() {
conf.set(YarnConfiguration.NM_LOCAL_DIRS, testRootDir.getAbsolutePath());
conf.set(YarnConfiguration.NM_LOG_DIRS, testLogDir.getAbsolutePath());
LocalDirsHandlerService dirsHandler = new LocalDirsHandlerService();
NodeHealthCheckerService healthChecker = new NodeHealthCheckerService(
NodeManager.getNodeHealthScriptRunner(conf), dirsHandler);
healthChecker.init(conf);
dirsHandler = healthChecker.getDiskHandler();
aclsManager = new ApplicationACLsManager(conf);
nmContext = new NodeManager.NMContext(null, null, dirsHandler,
aclsManager, null);
NodeId nodeId = NodeId.newInstance("testhost.foo.com", 9999);
((NodeManager.NMContext)nmContext).setNodeId(nodeId);
resourceView = new ResourceView() {
@Override
public long getVmemAllocatedForContainers() {
// 15.5G in bytes
return new Long("16642998272");
}
@Override
public long getPmemAllocatedForContainers() {
// 16G in bytes
return new Long("17179869184");
}
@Override
public long getVCoresAllocatedForContainers() {
return new Long("4000");
}
@Override
public boolean isVmemCheckEnabled() {
return true;
}
@Override
public boolean isPmemCheckEnabled() {
return true;
}
};
nmWebApp = new NMWebApp(resourceView, aclsManager, dirsHandler);
bind(JAXBContextResolver.class);
bind(NMWebServices.class);
bind(GenericExceptionHandler.class);
bind(Context.class).toInstance(nmContext);
bind(WebApp.class).toInstance(nmWebApp);
bind(ResourceView.class).toInstance(resourceView);
bind(ApplicationACLsManager.class).toInstance(aclsManager);
bind(LocalDirsHandlerService.class).toInstance(dirsHandler);
serve("/*").with(GuiceContainer.class);
}
});
public class GuiceServletConfig extends GuiceServletContextListener {
@Override
protected Injector getInjector() {
return injector;
}
}
@Before
@Override
public void setUp() throws Exception {
super.setUp();
testRootDir.mkdirs();
testLogDir.mkdir();
}
@AfterClass
static public void cleanup() {
FileUtil.fullyDelete(testRootDir);
FileUtil.fullyDelete(testLogDir);
}
public TestNMWebServicesApps() {
super(new WebAppDescriptor.Builder(
"org.apache.hadoop.yarn.server.nodemanager.webapp")
.contextListenerClass(GuiceServletConfig.class)
.filterClass(com.google.inject.servlet.GuiceFilter.class)
.contextPath("jersey-guice-filter").servletPath("/").build());
}
@Test
public void testNodeAppsNone() throws JSONException, Exception {
WebResource r = resource();
ClientResponse response = r.path("ws").path("v1").path("node").path("apps")
.accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject json = response.getEntity(JSONObject.class);
assertEquals("apps isn't NULL", JSONObject.NULL, json.get("apps"));
}
private HashMap<String, String> addAppContainers(Application app)
throws IOException {
Dispatcher dispatcher = new AsyncDispatcher();
ApplicationAttemptId appAttemptId = BuilderUtils.newApplicationAttemptId(
app.getAppId(), 1);
Container container1 = new MockContainer(appAttemptId, dispatcher, conf,
app.getUser(), app.getAppId(), 1);
Container container2 = new MockContainer(appAttemptId, dispatcher, conf,
app.getUser(), app.getAppId(), 2);
nmContext.getContainers()
.put(container1.getContainerId(), container1);
nmContext.getContainers()
.put(container2.getContainerId(), container2);
app.getContainers().put(container1.getContainerId(), container1);
app.getContainers().put(container2.getContainerId(), container2);
HashMap<String, String> hash = new HashMap<String, String>();
hash.put(container1.getContainerId().toString(), container1
.getContainerId().toString());
hash.put(container2.getContainerId().toString(), container2
.getContainerId().toString());
return hash;
}
@Test
public void testNodeApps() throws JSONException, Exception {
testNodeHelper("apps", MediaType.APPLICATION_JSON);
}
@Test
public void testNodeAppsSlash() throws JSONException, Exception {
testNodeHelper("apps/", MediaType.APPLICATION_JSON);
}
// make sure default is json output
@Test
public void testNodeAppsDefault() throws JSONException, Exception {
testNodeHelper("apps/", "");
}
public void testNodeHelper(String path, String media) throws JSONException,
Exception {
WebResource r = resource();
Application app = new MockApp(1);
nmContext.getApplications().put(app.getAppId(), app);
HashMap<String, String> hash = addAppContainers(app);
Application app2 = new MockApp(2);
nmContext.getApplications().put(app2.getAppId(), app2);
HashMap<String, String> hash2 = addAppContainers(app2);
ClientResponse response = r.path("ws").path("v1").path("node").path(path)
.accept(media).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject json = response.getEntity(JSONObject.class);
JSONObject info = json.getJSONObject("apps");
assertEquals("incorrect number of elements", 1, info.length());
JSONArray appInfo = info.getJSONArray("app");
assertEquals("incorrect number of elements", 2, appInfo.length());
String id = appInfo.getJSONObject(0).getString("id");
if (id.matches(app.getAppId().toString())) {
verifyNodeAppInfo(appInfo.getJSONObject(0), app, hash);
verifyNodeAppInfo(appInfo.getJSONObject(1), app2, hash2);
} else {
verifyNodeAppInfo(appInfo.getJSONObject(0), app2, hash2);
verifyNodeAppInfo(appInfo.getJSONObject(1), app, hash);
}
}
@Test
public void testNodeAppsUser() throws JSONException, Exception {
WebResource r = resource();
Application app = new MockApp(1);
nmContext.getApplications().put(app.getAppId(), app);
HashMap<String, String> hash = addAppContainers(app);
Application app2 = new MockApp("foo", 1234, 2);
nmContext.getApplications().put(app2.getAppId(), app2);
addAppContainers(app2);
ClientResponse response = r.path("ws").path("v1").path("node").path("apps")
.queryParam("user", "mockUser").accept(MediaType.APPLICATION_JSON)
.get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject json = response.getEntity(JSONObject.class);
JSONObject info = json.getJSONObject("apps");
assertEquals("incorrect number of elements", 1, info.length());
JSONArray appInfo = info.getJSONArray("app");
assertEquals("incorrect number of elements", 1, appInfo.length());
verifyNodeAppInfo(appInfo.getJSONObject(0), app, hash);
}
@Test
public void testNodeAppsUserNone() throws JSONException, Exception {
WebResource r = resource();
Application app = new MockApp(1);
nmContext.getApplications().put(app.getAppId(), app);
addAppContainers(app);
Application app2 = new MockApp("foo", 1234, 2);
nmContext.getApplications().put(app2.getAppId(), app2);
addAppContainers(app2);
ClientResponse response = r.path("ws").path("v1").path("node").path("apps")
.queryParam("user", "george").accept(MediaType.APPLICATION_JSON)
.get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject json = response.getEntity(JSONObject.class);
assertEquals("apps is not null", JSONObject.NULL, json.get("apps"));
}
@Test
public void testNodeAppsUserEmpty() throws JSONException, Exception {
WebResource r = resource();
Application app = new MockApp(1);
nmContext.getApplications().put(app.getAppId(), app);
addAppContainers(app);
Application app2 = new MockApp("foo", 1234, 2);
nmContext.getApplications().put(app2.getAppId(), app2);
addAppContainers(app2);
try {
r.path("ws").path("v1").path("node").path("apps").queryParam("user", "")
.accept(MediaType.APPLICATION_JSON).get(JSONObject.class);
fail("should have thrown exception on invalid user query");
} catch (UniformInterfaceException ue) {
ClientResponse response = ue.getResponse();
assertEquals(Status.BAD_REQUEST, response.getClientResponseStatus());
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject msg = response.getEntity(JSONObject.class);
JSONObject exception = msg.getJSONObject("RemoteException");
assertEquals("incorrect number of elements", 3, exception.length());
String message = exception.getString("message");
String type = exception.getString("exception");
String classname = exception.getString("javaClassName");
WebServicesTestUtils
.checkStringMatch(
"exception message",
"java.lang.Exception: Error: You must specify a non-empty string for the user",
message);
WebServicesTestUtils.checkStringMatch("exception type",
"BadRequestException", type);
WebServicesTestUtils.checkStringMatch("exception classname",
"org.apache.hadoop.yarn.webapp.BadRequestException", classname);
}
}
@Test
public void testNodeAppsState() throws JSONException, Exception {
WebResource r = resource();
Application app = new MockApp(1);
nmContext.getApplications().put(app.getAppId(), app);
addAppContainers(app);
MockApp app2 = new MockApp("foo", 1234, 2);
nmContext.getApplications().put(app2.getAppId(), app2);
HashMap<String, String> hash2 = addAppContainers(app2);
app2.setState(ApplicationState.RUNNING);
ClientResponse response = r.path("ws").path("v1").path("node").path("apps")
.queryParam("state", ApplicationState.RUNNING.toString())
.accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject json = response.getEntity(JSONObject.class);
JSONObject info = json.getJSONObject("apps");
assertEquals("incorrect number of elements", 1, info.length());
JSONArray appInfo = info.getJSONArray("app");
assertEquals("incorrect number of elements", 1, appInfo.length());
verifyNodeAppInfo(appInfo.getJSONObject(0), app2, hash2);
}
@Test
public void testNodeAppsStateNone() throws JSONException, Exception {
WebResource r = resource();
Application app = new MockApp(1);
nmContext.getApplications().put(app.getAppId(), app);
addAppContainers(app);
Application app2 = new MockApp("foo", 1234, 2);
nmContext.getApplications().put(app2.getAppId(), app2);
addAppContainers(app2);
ClientResponse response = r.path("ws").path("v1").path("node").path("apps")
.queryParam("state", ApplicationState.INITING.toString())
.accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject json = response.getEntity(JSONObject.class);
assertEquals("apps is not null", JSONObject.NULL, json.get("apps"));
}
@Test
public void testNodeAppsStateInvalid() throws JSONException, Exception {
WebResource r = resource();
Application app = new MockApp(1);
nmContext.getApplications().put(app.getAppId(), app);
addAppContainers(app);
Application app2 = new MockApp("foo", 1234, 2);
nmContext.getApplications().put(app2.getAppId(), app2);
addAppContainers(app2);
try {
r.path("ws").path("v1").path("node").path("apps")
.queryParam("state", "FOO_STATE").accept(MediaType.APPLICATION_JSON)
.get(JSONObject.class);
fail("should have thrown exception on invalid user query");
} catch (UniformInterfaceException ue) {
ClientResponse response = ue.getResponse();
assertEquals(Status.BAD_REQUEST, response.getClientResponseStatus());
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject msg = response.getEntity(JSONObject.class);
JSONObject exception = msg.getJSONObject("RemoteException");
assertEquals("incorrect number of elements", 3, exception.length());
String message = exception.getString("message");
String type = exception.getString("exception");
String classname = exception.getString("javaClassName");
verifyStateInvalidException(message, type, classname);
}
}
// verify the exception object default format is JSON
@Test
public void testNodeAppsStateInvalidDefault() throws JSONException, Exception {
WebResource r = resource();
Application app = new MockApp(1);
nmContext.getApplications().put(app.getAppId(), app);
addAppContainers(app);
Application app2 = new MockApp("foo", 1234, 2);
nmContext.getApplications().put(app2.getAppId(), app2);
addAppContainers(app2);
try {
r.path("ws").path("v1").path("node").path("apps")
.queryParam("state", "FOO_STATE").get(JSONObject.class);
fail("should have thrown exception on invalid user query");
} catch (UniformInterfaceException ue) {
ClientResponse response = ue.getResponse();
assertEquals(Status.BAD_REQUEST, response.getClientResponseStatus());
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject msg = response.getEntity(JSONObject.class);
JSONObject exception = msg.getJSONObject("RemoteException");
assertEquals("incorrect number of elements", 3, exception.length());
String message = exception.getString("message");
String type = exception.getString("exception");
String classname = exception.getString("javaClassName");
verifyStateInvalidException(message, type, classname);
}
}
// test that the exception output also returns XML
@Test
public void testNodeAppsStateInvalidXML() throws JSONException, Exception {
WebResource r = resource();
Application app = new MockApp(1);
nmContext.getApplications().put(app.getAppId(), app);
addAppContainers(app);
Application app2 = new MockApp("foo", 1234, 2);
nmContext.getApplications().put(app2.getAppId(), app2);
addAppContainers(app2);
try {
r.path("ws").path("v1").path("node").path("apps")
.queryParam("state", "FOO_STATE").accept(MediaType.APPLICATION_XML)
.get(JSONObject.class);
fail("should have thrown exception on invalid user query");
} catch (UniformInterfaceException ue) {
ClientResponse response = ue.getResponse();
assertEquals(Status.BAD_REQUEST, response.getClientResponseStatus());
assertEquals(MediaType.APPLICATION_XML_TYPE, response.getType());
String msg = response.getEntity(String.class);
DocumentBuilderFactory dbf = DocumentBuilderFactory.newInstance();
DocumentBuilder db = dbf.newDocumentBuilder();
InputSource is = new InputSource();
is.setCharacterStream(new StringReader(msg));
Document dom = db.parse(is);
NodeList nodes = dom.getElementsByTagName("RemoteException");
Element element = (Element) nodes.item(0);
String message = WebServicesTestUtils.getXmlString(element, "message");
String type = WebServicesTestUtils.getXmlString(element, "exception");
String classname = WebServicesTestUtils.getXmlString(element,
"javaClassName");
verifyStateInvalidException(message, type, classname);
}
}
private void verifyStateInvalidException(String message, String type,
String classname) {
WebServicesTestUtils
.checkStringContains(
"exception message",
"org.apache.hadoop.yarn.server.nodemanager.containermanager.application.ApplicationState.FOO_STATE",
message);
WebServicesTestUtils.checkStringMatch("exception type",
"IllegalArgumentException", type);
WebServicesTestUtils.checkStringMatch("exception classname",
"java.lang.IllegalArgumentException", classname);
}
@Test
public void testNodeSingleApps() throws JSONException, Exception {
testNodeSingleAppHelper(MediaType.APPLICATION_JSON);
}
// make sure default is json output
@Test
public void testNodeSingleAppsDefault() throws JSONException, Exception {
testNodeSingleAppHelper("");
}
public void testNodeSingleAppHelper(String media) throws JSONException,
Exception {
WebResource r = resource();
Application app = new MockApp(1);
nmContext.getApplications().put(app.getAppId(), app);
HashMap<String, String> hash = addAppContainers(app);
Application app2 = new MockApp(2);
nmContext.getApplications().put(app2.getAppId(), app2);
addAppContainers(app2);
ClientResponse response = r.path("ws").path("v1").path("node").path("apps")
.path(app.getAppId().toString()).accept(media)
.get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject json = response.getEntity(JSONObject.class);
verifyNodeAppInfo(json.getJSONObject("app"), app, hash);
}
@Test
public void testNodeSingleAppsSlash() throws JSONException, Exception {
WebResource r = resource();
Application app = new MockApp(1);
nmContext.getApplications().put(app.getAppId(), app);
HashMap<String, String> hash = addAppContainers(app);
Application app2 = new MockApp(2);
nmContext.getApplications().put(app2.getAppId(), app2);
addAppContainers(app2);
ClientResponse response = r.path("ws").path("v1").path("node").path("apps")
.path(app.getAppId().toString() + "/")
.accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject json = response.getEntity(JSONObject.class);
verifyNodeAppInfo(json.getJSONObject("app"), app, hash);
}
@Test
public void testNodeSingleAppsInvalid() throws JSONException, Exception {
WebResource r = resource();
Application app = new MockApp(1);
nmContext.getApplications().put(app.getAppId(), app);
addAppContainers(app);
Application app2 = new MockApp(2);
nmContext.getApplications().put(app2.getAppId(), app2);
addAppContainers(app2);
try {
r.path("ws").path("v1").path("node").path("apps").path("app_foo_0000")
.accept(MediaType.APPLICATION_JSON).get(JSONObject.class);
fail("should have thrown exception on invalid user query");
} catch (UniformInterfaceException ue) {
ClientResponse response = ue.getResponse();
assertEquals(Status.BAD_REQUEST, response.getClientResponseStatus());
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject msg = response.getEntity(JSONObject.class);
JSONObject exception = msg.getJSONObject("RemoteException");
assertEquals("incorrect number of elements", 3, exception.length());
String message = exception.getString("message");
String type = exception.getString("exception");
String classname = exception.getString("javaClassName");
WebServicesTestUtils.checkStringMatch("exception message",
"For input string: \"foo\"", message);
WebServicesTestUtils.checkStringMatch("exception type",
"NumberFormatException", type);
WebServicesTestUtils.checkStringMatch("exception classname",
"java.lang.NumberFormatException", classname);
}
}
@Test
public void testNodeSingleAppsMissing() throws JSONException, Exception {
WebResource r = resource();
Application app = new MockApp(1);
nmContext.getApplications().put(app.getAppId(), app);
addAppContainers(app);
Application app2 = new MockApp(2);
nmContext.getApplications().put(app2.getAppId(), app2);
addAppContainers(app2);
try {
r.path("ws").path("v1").path("node").path("apps")
.path("application_1234_0009").accept(MediaType.APPLICATION_JSON)
.get(JSONObject.class);
fail("should have thrown exception on invalid user query");
} catch (UniformInterfaceException ue) {
ClientResponse response = ue.getResponse();
assertEquals(Status.NOT_FOUND, response.getClientResponseStatus());
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject msg = response.getEntity(JSONObject.class);
JSONObject exception = msg.getJSONObject("RemoteException");
assertEquals("incorrect number of elements", 3, exception.length());
String message = exception.getString("message");
String type = exception.getString("exception");
String classname = exception.getString("javaClassName");
WebServicesTestUtils.checkStringMatch("exception message",
"java.lang.Exception: app with id application_1234_0009 not found",
message);
WebServicesTestUtils.checkStringMatch("exception type",
"NotFoundException", type);
WebServicesTestUtils.checkStringMatch("exception classname",
"org.apache.hadoop.yarn.webapp.NotFoundException", classname);
}
}
@Test
public void testNodeAppsXML() throws JSONException, Exception {
WebResource r = resource();
Application app = new MockApp(1);
nmContext.getApplications().put(app.getAppId(), app);
addAppContainers(app);
Application app2 = new MockApp(2);
nmContext.getApplications().put(app2.getAppId(), app2);
addAppContainers(app2);
ClientResponse response = r.path("ws").path("v1").path("node").path("apps")
.accept(MediaType.APPLICATION_XML).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_XML_TYPE, response.getType());
String xml = response.getEntity(String.class);
DocumentBuilderFactory dbf = DocumentBuilderFactory.newInstance();
DocumentBuilder db = dbf.newDocumentBuilder();
InputSource is = new InputSource();
is.setCharacterStream(new StringReader(xml));
Document dom = db.parse(is);
NodeList nodes = dom.getElementsByTagName("app");
assertEquals("incorrect number of elements", 2, nodes.getLength());
}
@Test
public void testNodeSingleAppsXML() throws JSONException, Exception {
WebResource r = resource();
Application app = new MockApp(1);
nmContext.getApplications().put(app.getAppId(), app);
HashMap<String, String> hash = addAppContainers(app);
Application app2 = new MockApp(2);
nmContext.getApplications().put(app2.getAppId(), app2);
addAppContainers(app2);
ClientResponse response = r.path("ws").path("v1").path("node").path("apps")
.path(app.getAppId().toString() + "/")
.accept(MediaType.APPLICATION_XML).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_XML_TYPE, response.getType());
String xml = response.getEntity(String.class);
DocumentBuilderFactory dbf = DocumentBuilderFactory.newInstance();
DocumentBuilder db = dbf.newDocumentBuilder();
InputSource is = new InputSource();
is.setCharacterStream(new StringReader(xml));
Document dom = db.parse(is);
NodeList nodes = dom.getElementsByTagName("app");
assertEquals("incorrect number of elements", 1, nodes.getLength());
verifyNodeAppInfoXML(nodes, app, hash);
}
public void verifyNodeAppInfoXML(NodeList nodes, Application app,
HashMap<String, String> hash) throws JSONException, Exception {
for (int i = 0; i < nodes.getLength(); i++) {
Element element = (Element) nodes.item(i);
verifyNodeAppInfoGeneric(app,
WebServicesTestUtils.getXmlString(element, "id"),
WebServicesTestUtils.getXmlString(element, "state"),
WebServicesTestUtils.getXmlString(element, "user"));
NodeList ids = element.getElementsByTagName("containerids");
for (int j = 0; j < ids.getLength(); j++) {
Element line = (Element) ids.item(j);
Node first = line.getFirstChild();
String val = first.getNodeValue();
assertEquals("extra containerid: " + val, val, hash.remove(val));
}
assertTrue("missing containerids", hash.isEmpty());
}
}
public void verifyNodeAppInfo(JSONObject info, Application app,
HashMap<String, String> hash) throws JSONException, Exception {
assertEquals("incorrect number of elements", 4, info.length());
verifyNodeAppInfoGeneric(app, info.getString("id"),
info.getString("state"), info.getString("user"));
JSONArray containerids = info.getJSONArray("containerids");
for (int i = 0; i < containerids.length(); i++) {
String id = containerids.getString(i);
assertEquals("extra containerid: " + id, id, hash.remove(id));
}
assertTrue("missing containerids", hash.isEmpty());
}
public void verifyNodeAppInfoGeneric(Application app, String id,
String state, String user) throws JSONException, Exception {
WebServicesTestUtils.checkStringMatch("id", app.getAppId().toString(), id);
WebServicesTestUtils.checkStringMatch("state", app.getApplicationState()
.toString(), state);
WebServicesTestUtils.checkStringMatch("user", app.getUser().toString(),
user);
}
}
| 29,612 | 41.004255 | 112 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/api/protocolrecords/impl/pb/TestPBRecordImpl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.nodemanager.api.protocolrecords.impl.pb;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
import java.io.IOException;
import java.net.URISyntaxException;
import java.util.ArrayList;
import org.junit.Assert;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.DataInputBuffer;
import org.apache.hadoop.io.DataOutputBuffer;
import org.apache.hadoop.yarn.api.records.LocalResource;
import org.apache.hadoop.yarn.api.records.LocalResourceVisibility;
import org.apache.hadoop.yarn.api.records.SerializedException;
import org.apache.hadoop.yarn.api.records.impl.pb.LocalResourcePBImpl;
import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.factories.RecordFactory;
import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
import org.apache.hadoop.yarn.proto.YarnServerNodemanagerServiceProtos.LocalResourceStatusProto;
import org.apache.hadoop.yarn.proto.YarnServerNodemanagerServiceProtos.LocalizerHeartbeatResponseProto;
import org.apache.hadoop.yarn.proto.YarnServerNodemanagerServiceProtos.LocalizerStatusProto;
import org.apache.hadoop.yarn.server.nodemanager.api.ResourceLocalizationSpec;
import org.apache.hadoop.yarn.server.nodemanager.api.protocolrecords.LocalResourceStatus;
import org.apache.hadoop.yarn.server.nodemanager.api.protocolrecords.LocalizerAction;
import org.apache.hadoop.yarn.server.nodemanager.api.protocolrecords.LocalizerHeartbeatResponse;
import org.apache.hadoop.yarn.server.nodemanager.api.protocolrecords.LocalizerStatus;
import org.apache.hadoop.yarn.server.nodemanager.api.protocolrecords.ResourceStatusType;
import org.apache.hadoop.yarn.util.ConverterUtils;
import org.junit.Test;
public class TestPBRecordImpl {
static final RecordFactory recordFactory = createPBRecordFactory();
static RecordFactory createPBRecordFactory() {
Configuration conf = new Configuration();
return RecordFactoryProvider.getRecordFactory(conf);
}
static LocalResource createResource() {
LocalResource ret = recordFactory.newRecordInstance(LocalResource.class);
assertTrue(ret instanceof LocalResourcePBImpl);
ret.setResource(ConverterUtils.getYarnUrlFromPath(new Path(
"hdfs://y.ak:8020/foo/bar")));
ret.setSize(4344L);
ret.setTimestamp(3141592653589793L);
ret.setVisibility(LocalResourceVisibility.PUBLIC);
return ret;
}
static LocalResourceStatus createLocalResourceStatus() {
LocalResourceStatus ret =
recordFactory.newRecordInstance(LocalResourceStatus.class);
assertTrue(ret instanceof LocalResourceStatusPBImpl);
ret.setResource(createResource());
ret.setLocalPath(
ConverterUtils.getYarnUrlFromPath(
new Path("file:///local/foo/bar")));
ret.setStatus(ResourceStatusType.FETCH_SUCCESS);
ret.setLocalSize(4443L);
Exception e = new Exception("Dingos.");
e.setStackTrace(new StackTraceElement[] {
new StackTraceElement("foo", "bar", "baz", 10),
new StackTraceElement("sbb", "one", "onm", 10) });
ret.setException(SerializedException.newInstance(e));
return ret;
}
static LocalizerStatus createLocalizerStatus() {
LocalizerStatus ret =
recordFactory.newRecordInstance(LocalizerStatus.class);
assertTrue(ret instanceof LocalizerStatusPBImpl);
ret.setLocalizerId("localizer0");
ret.addResourceStatus(createLocalResourceStatus());
return ret;
}
static LocalizerHeartbeatResponse createLocalizerHeartbeatResponse()
throws URISyntaxException {
LocalizerHeartbeatResponse ret =
recordFactory.newRecordInstance(LocalizerHeartbeatResponse.class);
assertTrue(ret instanceof LocalizerHeartbeatResponsePBImpl);
ret.setLocalizerAction(LocalizerAction.LIVE);
LocalResource rsrc = createResource();
ArrayList<ResourceLocalizationSpec> rsrcs =
new ArrayList<ResourceLocalizationSpec>();
ResourceLocalizationSpec resource =
recordFactory.newRecordInstance(ResourceLocalizationSpec.class);
resource.setResource(rsrc);
resource.setDestinationDirectory(ConverterUtils
.getYarnUrlFromPath(new Path("/tmp" + System.currentTimeMillis())));
rsrcs.add(resource);
ret.setResourceSpecs(rsrcs);
System.out.println(resource);
return ret;
}
@Test(timeout=10000)
public void testLocalResourceStatusSerDe() throws Exception {
LocalResourceStatus rsrcS = createLocalResourceStatus();
assertTrue(rsrcS instanceof LocalResourceStatusPBImpl);
LocalResourceStatusPBImpl rsrcPb = (LocalResourceStatusPBImpl) rsrcS;
DataOutputBuffer out = new DataOutputBuffer();
rsrcPb.getProto().writeDelimitedTo(out);
DataInputBuffer in = new DataInputBuffer();
in.reset(out.getData(), 0, out.getLength());
LocalResourceStatusProto rsrcPbD =
LocalResourceStatusProto.parseDelimitedFrom(in);
assertNotNull(rsrcPbD);
LocalResourceStatus rsrcD =
new LocalResourceStatusPBImpl(rsrcPbD);
assertEquals(rsrcS, rsrcD);
assertEquals(createResource(), rsrcS.getResource());
assertEquals(createResource(), rsrcD.getResource());
}
@Test(timeout=10000)
public void testLocalizerStatusSerDe() throws Exception {
LocalizerStatus rsrcS = createLocalizerStatus();
assertTrue(rsrcS instanceof LocalizerStatusPBImpl);
LocalizerStatusPBImpl rsrcPb = (LocalizerStatusPBImpl) rsrcS;
DataOutputBuffer out = new DataOutputBuffer();
rsrcPb.getProto().writeDelimitedTo(out);
DataInputBuffer in = new DataInputBuffer();
in.reset(out.getData(), 0, out.getLength());
LocalizerStatusProto rsrcPbD =
LocalizerStatusProto.parseDelimitedFrom(in);
assertNotNull(rsrcPbD);
LocalizerStatus rsrcD =
new LocalizerStatusPBImpl(rsrcPbD);
assertEquals(rsrcS, rsrcD);
assertEquals("localizer0", rsrcS.getLocalizerId());
assertEquals("localizer0", rsrcD.getLocalizerId());
assertEquals(createLocalResourceStatus(), rsrcS.getResourceStatus(0));
assertEquals(createLocalResourceStatus(), rsrcD.getResourceStatus(0));
}
@Test(timeout=10000)
public void testLocalizerHeartbeatResponseSerDe() throws Exception {
LocalizerHeartbeatResponse rsrcS = createLocalizerHeartbeatResponse();
assertTrue(rsrcS instanceof LocalizerHeartbeatResponsePBImpl);
LocalizerHeartbeatResponsePBImpl rsrcPb =
(LocalizerHeartbeatResponsePBImpl) rsrcS;
DataOutputBuffer out = new DataOutputBuffer();
rsrcPb.getProto().writeDelimitedTo(out);
DataInputBuffer in = new DataInputBuffer();
in.reset(out.getData(), 0, out.getLength());
LocalizerHeartbeatResponseProto rsrcPbD =
LocalizerHeartbeatResponseProto.parseDelimitedFrom(in);
assertNotNull(rsrcPbD);
LocalizerHeartbeatResponse rsrcD =
new LocalizerHeartbeatResponsePBImpl(rsrcPbD);
assertEquals(rsrcS, rsrcD);
assertEquals(createResource(), rsrcS.getResourceSpecs().get(0).getResource());
assertEquals(createResource(), rsrcD.getResourceSpecs().get(0).getResource());
}
@Test(timeout=10000)
public void testSerializedExceptionDeSer() throws Exception{
// without cause
YarnException yarnEx = new YarnException("Yarn_Exception");
SerializedException serEx = SerializedException.newInstance(yarnEx);
Throwable throwable = serEx.deSerialize();
Assert.assertEquals(yarnEx.getClass(), throwable.getClass());
Assert.assertEquals(yarnEx.getMessage(), throwable.getMessage());
// with cause
IOException ioe = new IOException("Test_IOException");
RuntimeException runtimeException =
new RuntimeException("Test_RuntimeException", ioe);
YarnException yarnEx2 =
new YarnException("Test_YarnException", runtimeException);
SerializedException serEx2 = SerializedException.newInstance(yarnEx2);
Throwable throwable2 = serEx2.deSerialize();
throwable2.printStackTrace();
Assert.assertEquals(yarnEx2.getClass(), throwable2.getClass());
Assert.assertEquals(yarnEx2.getMessage(), throwable2.getMessage());
Assert.assertEquals(runtimeException.getClass(), throwable2.getCause().getClass());
Assert.assertEquals(runtimeException.getMessage(), throwable2.getCause().getMessage());
Assert.assertEquals(ioe.getClass(), throwable2.getCause().getCause().getClass());
Assert.assertEquals(ioe.getMessage(), throwable2.getCause().getCause().getMessage());
}
}
| 9,279 | 42.568075 | 103 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/api/protocolrecords/impl/pb/TestPBLocalizerRPC.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.nodemanager.api.protocolrecords.impl.pb;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import java.net.InetSocketAddress;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.ipc.Server;
import org.apache.hadoop.yarn.factories.RecordFactory;
import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
import org.apache.hadoop.yarn.ipc.YarnRPC;
import org.apache.hadoop.yarn.server.nodemanager.api.LocalizationProtocol;
import org.apache.hadoop.yarn.server.nodemanager.api.protocolrecords.LocalizerAction;
import org.apache.hadoop.yarn.server.nodemanager.api.protocolrecords.LocalizerHeartbeatResponse;
import org.apache.hadoop.yarn.server.nodemanager.api.protocolrecords.LocalizerStatus;
import org.junit.Test;
public class TestPBLocalizerRPC {
static final RecordFactory recordFactory = createPBRecordFactory();
static RecordFactory createPBRecordFactory() {
Configuration conf = new Configuration();
return RecordFactoryProvider.getRecordFactory(conf);
}
static class LocalizerService implements LocalizationProtocol {
private final InetSocketAddress locAddr;
private Server server;
LocalizerService(InetSocketAddress locAddr) {
this.locAddr = locAddr;
}
public void start() {
Configuration conf = new Configuration();
YarnRPC rpc = YarnRPC.create(conf);
server = rpc.getServer(
LocalizationProtocol.class, this, locAddr, conf, null, 1);
server.start();
}
public void stop() {
if (server != null) {
server.stop();
}
}
@Override
public LocalizerHeartbeatResponse heartbeat(LocalizerStatus status) {
return dieHBResponse();
}
}
static LocalizerHeartbeatResponse dieHBResponse() {
LocalizerHeartbeatResponse response =
recordFactory.newRecordInstance(LocalizerHeartbeatResponse.class);
response.setLocalizerAction(LocalizerAction.DIE);
return response;
}
@Test
public void testLocalizerRPC() throws Exception {
InetSocketAddress locAddr = new InetSocketAddress("0.0.0.0", 8040);
LocalizerService server = new LocalizerService(locAddr);
try {
server.start();
Configuration conf = new Configuration();
YarnRPC rpc = YarnRPC.create(conf);
LocalizationProtocol client = (LocalizationProtocol)
rpc.getProxy(LocalizationProtocol.class, locAddr, conf);
LocalizerStatus status =
recordFactory.newRecordInstance(LocalizerStatus.class);
status.setLocalizerId("localizer0");
LocalizerHeartbeatResponse response = client.heartbeat(status);
assertEquals(dieHBResponse(), response);
} finally {
server.stop();
}
assertTrue(true);
}
}
| 3,599 | 34.294118 | 96 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/util/TestCgroupsLCEResourcesHandler.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.nodemanager.util;
import org.apache.commons.io.FileUtils;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.server.nodemanager.LinuxContainerExecutor;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources.TestCGroupsHandlerImpl;
import org.apache.hadoop.yarn.util.ControlledClock;
import org.apache.hadoop.yarn.util.ResourceCalculatorPlugin;
import org.junit.Assert;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.junit.Test;
import org.junit.After;
import org.junit.Before;
import org.mockito.Mockito;
import java.io.*;
import java.util.List;
import java.util.Scanner;
import java.util.concurrent.CountDownLatch;
public class TestCgroupsLCEResourcesHandler {
static File cgroupDir = null;
@Before
public void setUp() throws Exception {
cgroupDir =
new File(System.getProperty("test.build.data",
System.getProperty("java.io.tmpdir", "target")), this.getClass()
.getName());
FileUtils.deleteQuietly(cgroupDir);
}
@After
public void tearDown() throws Exception {
FileUtils.deleteQuietly(cgroupDir);
}
@Test
public void testcheckAndDeleteCgroup() throws Exception {
CgroupsLCEResourcesHandler handler = new CgroupsLCEResourcesHandler();
handler.setConf(new YarnConfiguration());
handler.initConfig();
FileUtils.deleteQuietly(cgroupDir);
// Test 0
// tasks file not present, should return false
Assert.assertFalse(handler.checkAndDeleteCgroup(cgroupDir));
File tfile = new File(cgroupDir.getAbsolutePath(), "tasks");
FileOutputStream fos = FileUtils.openOutputStream(tfile);
File fspy = Mockito.spy(cgroupDir);
// Test 1, tasks file is empty
// tasks file has no data, should return true
Mockito.stub(fspy.delete()).toReturn(true);
Assert.assertTrue(handler.checkAndDeleteCgroup(fspy));
// Test 2, tasks file has data
fos.write("1234".getBytes());
fos.close();
// tasks has data, would not be able to delete, should return false
Assert.assertFalse(handler.checkAndDeleteCgroup(fspy));
FileUtils.deleteQuietly(cgroupDir);
}
// Verify DeleteCgroup times out if "tasks" file contains data
@Test
public void testDeleteCgroup() throws Exception {
final ControlledClock clock = new ControlledClock();
CgroupsLCEResourcesHandler handler = new CgroupsLCEResourcesHandler();
handler.setConf(new YarnConfiguration());
handler.initConfig();
handler.clock = clock;
FileUtils.deleteQuietly(cgroupDir);
// Create a non-empty tasks file
File tfile = new File(cgroupDir.getAbsolutePath(), "tasks");
FileOutputStream fos = FileUtils.openOutputStream(tfile);
fos.write("1234".getBytes());
fos.close();
final CountDownLatch latch = new CountDownLatch(1);
new Thread() {
@Override
public void run() {
latch.countDown();
try {
Thread.sleep(200);
} catch (InterruptedException ex) {
//NOP
}
clock.tickMsec(YarnConfiguration.
DEFAULT_NM_LINUX_CONTAINER_CGROUPS_DELETE_TIMEOUT);
}
}.start();
latch.await();
Assert.assertFalse(handler.deleteCgroup(cgroupDir.getAbsolutePath()));
FileUtils.deleteQuietly(cgroupDir);
}
static class MockLinuxContainerExecutor extends LinuxContainerExecutor {
@Override
public void mountCgroups(List<String> x, String y) {
}
}
static class CustomCgroupsLCEResourceHandler extends
CgroupsLCEResourcesHandler {
String mtabFile;
int[] limits = new int[2];
boolean generateLimitsMode = false;
@Override
int[] getOverallLimits(float x) {
if (generateLimitsMode) {
return super.getOverallLimits(x);
}
return limits;
}
void setMtabFile(String file) {
mtabFile = file;
}
@Override
String getMtabFileName() {
return mtabFile;
}
}
@Test
public void testInit() throws IOException {
LinuxContainerExecutor mockLCE = new MockLinuxContainerExecutor();
CustomCgroupsLCEResourceHandler handler =
new CustomCgroupsLCEResourceHandler();
YarnConfiguration conf = new YarnConfiguration();
final int numProcessors = 4;
ResourceCalculatorPlugin plugin =
Mockito.mock(ResourceCalculatorPlugin.class);
Mockito.doReturn(numProcessors).when(plugin).getNumProcessors();
Mockito.doReturn(numProcessors).when(plugin).getNumCores();
handler.setConf(conf);
handler.initConfig();
// create mock cgroup
File cpuCgroupMountDir = TestCGroupsHandlerImpl.createMockCgroupMount(
cgroupDir, "cpu");
// create mock mtab
File mockMtab = TestCGroupsHandlerImpl.createMockMTab(cgroupDir);
// setup our handler and call init()
handler.setMtabFile(mockMtab.getAbsolutePath());
// check values
// in this case, we're using all cpu so the files
// shouldn't exist(because init won't create them
handler.init(mockLCE, plugin);
File periodFile = new File(cpuCgroupMountDir, "cpu.cfs_period_us");
File quotaFile = new File(cpuCgroupMountDir, "cpu.cfs_quota_us");
Assert.assertFalse(periodFile.exists());
Assert.assertFalse(quotaFile.exists());
// subset of cpu being used, files should be created
conf
.setInt(YarnConfiguration.NM_RESOURCE_PERCENTAGE_PHYSICAL_CPU_LIMIT, 75);
handler.limits[0] = 100 * 1000;
handler.limits[1] = 1000 * 1000;
handler.init(mockLCE, plugin);
int period = readIntFromFile(periodFile);
int quota = readIntFromFile(quotaFile);
Assert.assertEquals(100 * 1000, period);
Assert.assertEquals(1000 * 1000, quota);
// set cpu back to 100, quota should be -1
conf.setInt(YarnConfiguration.NM_RESOURCE_PERCENTAGE_PHYSICAL_CPU_LIMIT,
100);
handler.limits[0] = 100 * 1000;
handler.limits[1] = 1000 * 1000;
handler.init(mockLCE, plugin);
quota = readIntFromFile(quotaFile);
Assert.assertEquals(-1, quota);
FileUtils.deleteQuietly(cgroupDir);
}
private int readIntFromFile(File targetFile) throws IOException {
Scanner scanner = new Scanner(targetFile);
try {
return scanner.hasNextInt() ? scanner.nextInt() : -1;
} finally {
scanner.close();
}
}
@Test
public void testGetOverallLimits() {
int expectedQuota = 1000 * 1000;
CgroupsLCEResourcesHandler handler = new CgroupsLCEResourcesHandler();
int[] ret = handler.getOverallLimits(2);
Assert.assertEquals(expectedQuota / 2, ret[0]);
Assert.assertEquals(expectedQuota, ret[1]);
ret = handler.getOverallLimits(2000);
Assert.assertEquals(expectedQuota, ret[0]);
Assert.assertEquals(-1, ret[1]);
int[] params = {0, -1};
for (int cores : params) {
try {
handler.getOverallLimits(cores);
Assert.fail("Function call should throw error.");
} catch (IllegalArgumentException ie) {
// expected
}
}
// test minimums
ret = handler.getOverallLimits(1000 * 1000);
Assert.assertEquals(1000 * 1000, ret[0]);
Assert.assertEquals(-1, ret[1]);
}
@Test
public void testContainerLimits() throws IOException {
LinuxContainerExecutor mockLCE = new MockLinuxContainerExecutor();
CustomCgroupsLCEResourceHandler handler =
new CustomCgroupsLCEResourceHandler();
handler.generateLimitsMode = true;
YarnConfiguration conf = new YarnConfiguration();
conf.setBoolean(YarnConfiguration.NM_DISK_RESOURCE_ENABLED, true);
final int numProcessors = 4;
ResourceCalculatorPlugin plugin =
Mockito.mock(ResourceCalculatorPlugin.class);
Mockito.doReturn(numProcessors).when(plugin).getNumProcessors();
Mockito.doReturn(numProcessors).when(plugin).getNumCores();
handler.setConf(conf);
handler.initConfig();
// create mock cgroup
File cpuCgroupMountDir = TestCGroupsHandlerImpl.createMockCgroupMount(
cgroupDir, "cpu");
// create mock mtab
File mockMtab = TestCGroupsHandlerImpl.createMockMTab(cgroupDir);
// setup our handler and call init()
handler.setMtabFile(mockMtab.getAbsolutePath());
handler.init(mockLCE, plugin);
// check the controller paths map isn't empty
ContainerId id = ContainerId.fromString("container_1_1_1_1");
handler.preExecute(id, Resource.newInstance(1024, 1));
Assert.assertNotNull(handler.getControllerPaths());
// check values
// default case - files shouldn't exist, strict mode off by default
File containerCpuDir = new File(cpuCgroupMountDir, id.toString());
Assert.assertTrue(containerCpuDir.exists());
Assert.assertTrue(containerCpuDir.isDirectory());
File periodFile = new File(containerCpuDir, "cpu.cfs_period_us");
File quotaFile = new File(containerCpuDir, "cpu.cfs_quota_us");
Assert.assertFalse(periodFile.exists());
Assert.assertFalse(quotaFile.exists());
// no files created because we're using all cpu
FileUtils.deleteQuietly(containerCpuDir);
conf.setBoolean(
YarnConfiguration.NM_LINUX_CONTAINER_CGROUPS_STRICT_RESOURCE_USAGE,
true);
handler.initConfig();
handler.preExecute(id,
Resource.newInstance(1024, YarnConfiguration.DEFAULT_NM_VCORES));
Assert.assertTrue(containerCpuDir.exists());
Assert.assertTrue(containerCpuDir.isDirectory());
periodFile = new File(containerCpuDir, "cpu.cfs_period_us");
quotaFile = new File(containerCpuDir, "cpu.cfs_quota_us");
Assert.assertFalse(periodFile.exists());
Assert.assertFalse(quotaFile.exists());
// 50% of CPU
FileUtils.deleteQuietly(containerCpuDir);
conf.setBoolean(
YarnConfiguration.NM_LINUX_CONTAINER_CGROUPS_STRICT_RESOURCE_USAGE,
true);
handler.initConfig();
handler.preExecute(id,
Resource.newInstance(1024, YarnConfiguration.DEFAULT_NM_VCORES / 2));
Assert.assertTrue(containerCpuDir.exists());
Assert.assertTrue(containerCpuDir.isDirectory());
periodFile = new File(containerCpuDir, "cpu.cfs_period_us");
quotaFile = new File(containerCpuDir, "cpu.cfs_quota_us");
Assert.assertTrue(periodFile.exists());
Assert.assertTrue(quotaFile.exists());
Assert.assertEquals(500 * 1000, readIntFromFile(periodFile));
Assert.assertEquals(1000 * 1000, readIntFromFile(quotaFile));
// CGroups set to 50% of CPU, container set to 50% of YARN CPU
FileUtils.deleteQuietly(containerCpuDir);
conf.setBoolean(
YarnConfiguration.NM_LINUX_CONTAINER_CGROUPS_STRICT_RESOURCE_USAGE,
true);
conf
.setInt(YarnConfiguration.NM_RESOURCE_PERCENTAGE_PHYSICAL_CPU_LIMIT, 50);
handler.initConfig();
handler.init(mockLCE, plugin);
handler.preExecute(id,
Resource.newInstance(1024, YarnConfiguration.DEFAULT_NM_VCORES / 2));
Assert.assertTrue(containerCpuDir.exists());
Assert.assertTrue(containerCpuDir.isDirectory());
periodFile = new File(containerCpuDir, "cpu.cfs_period_us");
quotaFile = new File(containerCpuDir, "cpu.cfs_quota_us");
Assert.assertTrue(periodFile.exists());
Assert.assertTrue(quotaFile.exists());
Assert.assertEquals(1000 * 1000, readIntFromFile(periodFile));
Assert.assertEquals(1000 * 1000, readIntFromFile(quotaFile));
FileUtils.deleteQuietly(cgroupDir);
}
}
| 12,200 | 34.365217 | 105 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/util/TestProcessIdFileReader.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.nodemanager.util;
import static org.junit.Assert.*;
import java.io.File;
import java.io.IOException;
import java.io.PrintWriter;
import org.junit.Assert;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.util.Shell;
import org.apache.hadoop.yarn.server.nodemanager.util.ProcessIdFileReader;
import org.junit.Test;
public class TestProcessIdFileReader {
@Test (timeout = 30000)
public void testNullPath() {
String pid = null;
try {
pid = ProcessIdFileReader.getProcessId(null);
fail("Expected an error to be thrown for null path");
} catch (Exception e) {
// expected
}
assert(pid == null);
}
@Test (timeout = 30000)
public void testSimpleGet() throws IOException {
String rootDir = new File(System.getProperty(
"test.build.data", "/tmp")).getAbsolutePath();
File testFile = null;
String expectedProcessId = Shell.WINDOWS ?
"container_1353742680940_0002_01_000001" :
"56789";
try {
testFile = new File(rootDir, "temp.txt");
PrintWriter fileWriter = new PrintWriter(testFile);
fileWriter.println(expectedProcessId);
fileWriter.close();
String processId = null;
processId = ProcessIdFileReader.getProcessId(
new Path(rootDir + Path.SEPARATOR + "temp.txt"));
Assert.assertEquals(expectedProcessId, processId);
} finally {
if (testFile != null
&& testFile.exists()) {
testFile.delete();
}
}
}
@Test (timeout = 30000)
public void testComplexGet() throws IOException {
String rootDir = new File(System.getProperty(
"test.build.data", "/tmp")).getAbsolutePath();
File testFile = null;
String processIdInFile = Shell.WINDOWS ?
" container_1353742680940_0002_01_000001 " :
" 23 ";
String expectedProcessId = processIdInFile.trim();
try {
testFile = new File(rootDir, "temp.txt");
PrintWriter fileWriter = new PrintWriter(testFile);
fileWriter.println(" ");
fileWriter.println("");
fileWriter.println("abc");
fileWriter.println("-123");
fileWriter.println("-123 ");
fileWriter.println(processIdInFile);
fileWriter.println("6236");
fileWriter.close();
String processId = null;
processId = ProcessIdFileReader.getProcessId(
new Path(rootDir + Path.SEPARATOR + "temp.txt"));
Assert.assertEquals(expectedProcessId, processId);
} finally {
if (testFile != null
&& testFile.exists()) {
testFile.delete();
}
}
}
}
| 3,495 | 30.495495 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/util/TestNodeManagerHardwareUtils.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.nodemanager.util;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.util.ResourceCalculatorPlugin;
import org.junit.Assert;
import org.junit.Test;
import org.mockito.Mockito;
/**
* Test the various functions provided by the NodeManagerHardwareUtils class.
*/
public class TestNodeManagerHardwareUtils {
static class TestResourceCalculatorPlugin extends ResourceCalculatorPlugin {
TestResourceCalculatorPlugin() {
super(null);
}
@Override
public long getVirtualMemorySize() {
return 0;
}
@Override
public long getPhysicalMemorySize() {
long ret = Runtime.getRuntime().maxMemory() * 2;
ret = ret + (4L * 1024 * 1024 * 1024);
return ret;
}
@Override
public long getAvailableVirtualMemorySize() {
return 0;
}
@Override
public long getAvailablePhysicalMemorySize() {
return 0;
}
@Override
public int getNumProcessors() {
return 8;
}
@Override
public long getCpuFrequency() {
return 0;
}
@Override
public long getCumulativeCpuTime() {
return 0;
}
@Override
public float getCpuUsage() {
return 0;
}
@Override
public int getNumCores() {
return 4;
}
}
@Test
public void testGetContainerCPU() {
YarnConfiguration conf = new YarnConfiguration();
float ret;
final int numProcessors = 8;
final int numCores = 4;
ResourceCalculatorPlugin plugin =
Mockito.mock(ResourceCalculatorPlugin.class);
Mockito.doReturn(numProcessors).when(plugin).getNumProcessors();
Mockito.doReturn(numCores).when(plugin).getNumCores();
conf.setInt(YarnConfiguration.NM_RESOURCE_PERCENTAGE_PHYSICAL_CPU_LIMIT, 0);
boolean catchFlag = false;
try {
NodeManagerHardwareUtils.getContainersCPUs(plugin, conf);
Assert.fail("getContainerCores should have thrown exception");
} catch (IllegalArgumentException ie) {
catchFlag = true;
}
Assert.assertTrue(catchFlag);
conf.setInt(YarnConfiguration.NM_RESOURCE_PERCENTAGE_PHYSICAL_CPU_LIMIT,
100);
ret = NodeManagerHardwareUtils.getContainersCPUs(plugin, conf);
Assert.assertEquals(4, (int) ret);
conf
.setInt(YarnConfiguration.NM_RESOURCE_PERCENTAGE_PHYSICAL_CPU_LIMIT, 50);
ret = NodeManagerHardwareUtils.getContainersCPUs(plugin, conf);
Assert.assertEquals(2, (int) ret);
conf
.setInt(YarnConfiguration.NM_RESOURCE_PERCENTAGE_PHYSICAL_CPU_LIMIT, 75);
ret = NodeManagerHardwareUtils.getContainersCPUs(plugin, conf);
Assert.assertEquals(3, (int) ret);
conf
.setInt(YarnConfiguration.NM_RESOURCE_PERCENTAGE_PHYSICAL_CPU_LIMIT, 85);
ret = NodeManagerHardwareUtils.getContainersCPUs(plugin, conf);
Assert.assertEquals(3.4, ret, 0.1);
conf.setInt(YarnConfiguration.NM_RESOURCE_PERCENTAGE_PHYSICAL_CPU_LIMIT,
110);
ret = NodeManagerHardwareUtils.getContainersCPUs(plugin, conf);
Assert.assertEquals(4, (int) ret);
}
@Test
public void testGetVCores() {
ResourceCalculatorPlugin plugin = new TestResourceCalculatorPlugin();
YarnConfiguration conf = new YarnConfiguration();
conf.setFloat(YarnConfiguration.NM_PCORES_VCORES_MULTIPLIER, 1.25f);
int ret = NodeManagerHardwareUtils.getVCores(plugin, conf);
Assert.assertEquals(YarnConfiguration.DEFAULT_NM_VCORES, ret);
conf.setBoolean(YarnConfiguration.NM_ENABLE_HARDWARE_CAPABILITY_DETECTION,
true);
ret = NodeManagerHardwareUtils.getVCores(plugin, conf);
Assert.assertEquals(5, ret);
conf.setBoolean(YarnConfiguration.NM_COUNT_LOGICAL_PROCESSORS_AS_CORES,
true);
ret = NodeManagerHardwareUtils.getVCores(plugin, conf);
Assert.assertEquals(10, ret);
conf.setInt(YarnConfiguration.NM_VCORES, 10);
ret = NodeManagerHardwareUtils.getVCores(plugin, conf);
Assert.assertEquals(10, ret);
YarnConfiguration conf1 = new YarnConfiguration();
conf1.setBoolean(YarnConfiguration.NM_ENABLE_HARDWARE_CAPABILITY_DETECTION,
false);
conf.setInt(YarnConfiguration.NM_VCORES, 10);
ret = NodeManagerHardwareUtils.getVCores(plugin, conf);
Assert.assertEquals(10, ret);
}
@Test
public void testGetContainerMemoryMB() throws Exception {
ResourceCalculatorPlugin plugin = new TestResourceCalculatorPlugin();
long physicalMemMB = plugin.getPhysicalMemorySize() / (1024 * 1024);
YarnConfiguration conf = new YarnConfiguration();
conf.setBoolean(YarnConfiguration.NM_ENABLE_HARDWARE_CAPABILITY_DETECTION,
true);
int mem = NodeManagerHardwareUtils.getContainerMemoryMB(null, conf);
Assert.assertEquals(YarnConfiguration.DEFAULT_NM_PMEM_MB, mem);
mem = NodeManagerHardwareUtils.getContainerMemoryMB(plugin, conf);
int hadoopHeapSizeMB =
(int) (Runtime.getRuntime().maxMemory() / (1024 * 1024));
int calculatedMemMB =
(int) (0.8 * (physicalMemMB - (2 * hadoopHeapSizeMB)));
Assert.assertEquals(calculatedMemMB, mem);
conf.setInt(YarnConfiguration.NM_PMEM_MB, 1024);
mem = NodeManagerHardwareUtils.getContainerMemoryMB(conf);
Assert.assertEquals(1024, mem);
conf = new YarnConfiguration();
conf.setBoolean(YarnConfiguration.NM_ENABLE_HARDWARE_CAPABILITY_DETECTION,
false);
mem = NodeManagerHardwareUtils.getContainerMemoryMB(conf);
Assert.assertEquals(YarnConfiguration.DEFAULT_NM_PMEM_MB, mem);
conf.setInt(YarnConfiguration.NM_PMEM_MB, 10 * 1024);
mem = NodeManagerHardwareUtils.getContainerMemoryMB(conf);
Assert.assertEquals(10 * 1024, mem);
}
}
| 6,466 | 31.497487 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/metrics/TestNodeManagerMetrics.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.nodemanager.metrics;
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
import org.apache.hadoop.metrics2.MetricsRecordBuilder;
import static org.apache.hadoop.test.MetricsAsserts.*;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.util.Records;
import org.junit.Assert;
import org.junit.Test;
public class TestNodeManagerMetrics {
static final int GiB = 1024; // MiB
@Test public void testNames() {
DefaultMetricsSystem.initialize("NodeManager");
NodeManagerMetrics metrics = NodeManagerMetrics.create();
Resource total = Records.newRecord(Resource.class);
total.setMemory(8*GiB);
total.setVirtualCores(16);
Resource resource = Records.newRecord(Resource.class);
resource.setMemory(512); //512MiB
resource.setVirtualCores(2);
metrics.addResource(total);
for (int i = 10; i-- > 0;) {
// allocate 10 containers(allocatedGB: 5GiB, availableGB: 3GiB)
metrics.launchedContainer();
metrics.allocateContainer(resource);
}
metrics.initingContainer();
metrics.endInitingContainer();
metrics.runningContainer();
metrics.endRunningContainer();
// Releasing 3 containers(allocatedGB: 3.5GiB, availableGB: 4.5GiB)
metrics.completedContainer();
metrics.releaseContainer(resource);
metrics.failedContainer();
metrics.releaseContainer(resource);
metrics.killedContainer();
metrics.releaseContainer(resource);
metrics.initingContainer();
metrics.runningContainer();
Assert.assertTrue(!metrics.containerLaunchDuration.changed());
metrics.addContainerLaunchDuration(1);
Assert.assertTrue(metrics.containerLaunchDuration.changed());
// availableGB is expected to be floored,
// while allocatedGB is expected to be ceiled.
// allocatedGB: 3.5GB allocated memory is shown as 4GB
// availableGB: 4.5GB available memory is shown as 4GB
checkMetrics(10, 1, 1, 1, 1, 1, 4, 7, 4, 14, 2);
}
private void checkMetrics(int launched, int completed, int failed, int killed,
int initing, int running, int allocatedGB,
int allocatedContainers, int availableGB, int allocatedVCores,
int availableVCores) {
MetricsRecordBuilder rb = getMetrics("NodeManagerMetrics");
assertCounter("ContainersLaunched", launched, rb);
assertCounter("ContainersCompleted", completed, rb);
assertCounter("ContainersFailed", failed, rb);
assertCounter("ContainersKilled", killed, rb);
assertGauge("ContainersIniting", initing, rb);
assertGauge("ContainersRunning", running, rb);
assertGauge("AllocatedGB", allocatedGB, rb);
assertGauge("AllocatedVCores", allocatedVCores, rb);
assertGauge("AllocatedContainers", allocatedContainers, rb);
assertGauge("AvailableGB", availableGB, rb);
assertGauge("AvailableVCores",availableVCores, rb);
}
}
| 3,707 | 36.836735 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMMemoryStateStoreService.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.nodemanager.recovery;
import java.io.IOException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.yarn.api.protocolrecords.StartContainerRequest;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ContainerExitStatus;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.proto.YarnProtos.LocalResourceProto;
import org.apache.hadoop.yarn.proto.YarnServerNodemanagerRecoveryProtos.ContainerManagerApplicationProto;
import org.apache.hadoop.yarn.proto.YarnServerNodemanagerRecoveryProtos.DeletionServiceDeleteTaskProto;
import org.apache.hadoop.yarn.proto.YarnServerNodemanagerRecoveryProtos.LocalizedResourceProto;
import org.apache.hadoop.yarn.proto.YarnServerNodemanagerRecoveryProtos.LogDeleterProto;
import org.apache.hadoop.yarn.server.api.records.MasterKey;
import org.apache.hadoop.yarn.server.api.records.impl.pb.MasterKeyPBImpl;
public class NMMemoryStateStoreService extends NMStateStoreService {
private Map<ApplicationId, ContainerManagerApplicationProto> apps;
private Set<ApplicationId> finishedApps;
private Map<ContainerId, RecoveredContainerState> containerStates;
private Map<TrackerKey, TrackerState> trackerStates;
private Map<Integer, DeletionServiceDeleteTaskProto> deleteTasks;
private RecoveredNMTokensState nmTokenState;
private RecoveredContainerTokensState containerTokenState;
private Map<ApplicationId, LogDeleterProto> logDeleterState;
public NMMemoryStateStoreService() {
super(NMMemoryStateStoreService.class.getName());
}
@Override
protected void initStorage(Configuration conf) {
apps = new HashMap<ApplicationId, ContainerManagerApplicationProto>();
finishedApps = new HashSet<ApplicationId>();
containerStates = new HashMap<ContainerId, RecoveredContainerState>();
nmTokenState = new RecoveredNMTokensState();
nmTokenState.applicationMasterKeys =
new HashMap<ApplicationAttemptId, MasterKey>();
containerTokenState = new RecoveredContainerTokensState();
containerTokenState.activeTokens = new HashMap<ContainerId, Long>();
trackerStates = new HashMap<TrackerKey, TrackerState>();
deleteTasks = new HashMap<Integer, DeletionServiceDeleteTaskProto>();
logDeleterState = new HashMap<ApplicationId, LogDeleterProto>();
}
@Override
protected void startStorage() {
}
@Override
protected void closeStorage() {
}
@Override
public synchronized RecoveredApplicationsState loadApplicationsState()
throws IOException {
RecoveredApplicationsState state = new RecoveredApplicationsState();
state.applications = new ArrayList<ContainerManagerApplicationProto>(
apps.values());
state.finishedApplications = new ArrayList<ApplicationId>(finishedApps);
return state;
}
@Override
public synchronized void storeApplication(ApplicationId appId,
ContainerManagerApplicationProto proto) throws IOException {
ContainerManagerApplicationProto protoCopy =
ContainerManagerApplicationProto.parseFrom(proto.toByteString());
apps.put(appId, protoCopy);
}
@Override
public synchronized void storeFinishedApplication(ApplicationId appId) {
finishedApps.add(appId);
}
@Override
public synchronized void removeApplication(ApplicationId appId)
throws IOException {
apps.remove(appId);
finishedApps.remove(appId);
}
@Override
public synchronized List<RecoveredContainerState> loadContainersState()
throws IOException {
// return a copy so caller can't modify our state
List<RecoveredContainerState> result =
new ArrayList<RecoveredContainerState>(containerStates.size());
for (RecoveredContainerState rcs : containerStates.values()) {
RecoveredContainerState rcsCopy = new RecoveredContainerState();
rcsCopy.status = rcs.status;
rcsCopy.exitCode = rcs.exitCode;
rcsCopy.killed = rcs.killed;
rcsCopy.diagnostics = rcs.diagnostics;
rcsCopy.startRequest = rcs.startRequest;
result.add(rcsCopy);
}
return new ArrayList<RecoveredContainerState>();
}
@Override
public synchronized void storeContainer(ContainerId containerId,
StartContainerRequest startRequest) throws IOException {
RecoveredContainerState rcs = new RecoveredContainerState();
rcs.startRequest = startRequest;
containerStates.put(containerId, rcs);
}
@Override
public synchronized void storeContainerDiagnostics(ContainerId containerId,
StringBuilder diagnostics) throws IOException {
RecoveredContainerState rcs = getRecoveredContainerState(containerId);
rcs.diagnostics = diagnostics.toString();
}
@Override
public synchronized void storeContainerLaunched(ContainerId containerId)
throws IOException {
RecoveredContainerState rcs = getRecoveredContainerState(containerId);
if (rcs.exitCode != ContainerExitStatus.INVALID) {
throw new IOException("Container already completed");
}
rcs.status = RecoveredContainerStatus.LAUNCHED;
}
@Override
public synchronized void storeContainerKilled(ContainerId containerId)
throws IOException {
RecoveredContainerState rcs = getRecoveredContainerState(containerId);
rcs.killed = true;
}
@Override
public synchronized void storeContainerCompleted(ContainerId containerId,
int exitCode) throws IOException {
RecoveredContainerState rcs = getRecoveredContainerState(containerId);
rcs.status = RecoveredContainerStatus.COMPLETED;
rcs.exitCode = exitCode;
}
@Override
public synchronized void removeContainer(ContainerId containerId)
throws IOException {
containerStates.remove(containerId);
}
private RecoveredContainerState getRecoveredContainerState(
ContainerId containerId) throws IOException {
RecoveredContainerState rcs = containerStates.get(containerId);
if (rcs == null) {
throw new IOException("No start request for " + containerId);
}
return rcs;
}
private LocalResourceTrackerState loadTrackerState(TrackerState ts) {
LocalResourceTrackerState result = new LocalResourceTrackerState();
result.localizedResources.addAll(ts.localizedResources.values());
for (Map.Entry<Path, LocalResourceProto> entry :
ts.inProgressMap.entrySet()) {
result.inProgressResources.put(entry.getValue(), entry.getKey());
}
return result;
}
private TrackerState getTrackerState(TrackerKey key) {
TrackerState ts = trackerStates.get(key);
if (ts == null) {
ts = new TrackerState();
trackerStates.put(key, ts);
}
return ts;
}
@Override
public synchronized RecoveredLocalizationState loadLocalizationState() {
RecoveredLocalizationState result = new RecoveredLocalizationState();
for (Map.Entry<TrackerKey, TrackerState> e : trackerStates.entrySet()) {
TrackerKey tk = e.getKey();
TrackerState ts = e.getValue();
// check what kind of tracker state we have and recover appropriately
// public trackers have user == null
// private trackers have a valid user but appId == null
// app-specific trackers have a valid user and valid appId
if (tk.user == null) {
result.publicTrackerState = loadTrackerState(ts);
} else {
RecoveredUserResources rur = result.userResources.get(tk.user);
if (rur == null) {
rur = new RecoveredUserResources();
result.userResources.put(tk.user, rur);
}
if (tk.appId == null) {
rur.privateTrackerState = loadTrackerState(ts);
} else {
rur.appTrackerStates.put(tk.appId, loadTrackerState(ts));
}
}
}
return result;
}
@Override
public synchronized void startResourceLocalization(String user,
ApplicationId appId, LocalResourceProto proto, Path localPath) {
TrackerState ts = getTrackerState(new TrackerKey(user, appId));
ts.inProgressMap.put(localPath, proto);
}
@Override
public synchronized void finishResourceLocalization(String user,
ApplicationId appId, LocalizedResourceProto proto) {
TrackerState ts = getTrackerState(new TrackerKey(user, appId));
Path localPath = new Path(proto.getLocalPath());
ts.inProgressMap.remove(localPath);
ts.localizedResources.put(localPath, proto);
}
@Override
public synchronized void removeLocalizedResource(String user,
ApplicationId appId, Path localPath) {
TrackerState ts = trackerStates.get(new TrackerKey(user, appId));
if (ts != null) {
ts.inProgressMap.remove(localPath);
ts.localizedResources.remove(localPath);
}
}
@Override
public synchronized RecoveredDeletionServiceState loadDeletionServiceState()
throws IOException {
RecoveredDeletionServiceState result =
new RecoveredDeletionServiceState();
result.tasks = new ArrayList<DeletionServiceDeleteTaskProto>(
deleteTasks.values());
return result;
}
@Override
public synchronized void storeDeletionTask(int taskId,
DeletionServiceDeleteTaskProto taskProto) throws IOException {
deleteTasks.put(taskId, taskProto);
}
@Override
public synchronized void removeDeletionTask(int taskId) throws IOException {
deleteTasks.remove(taskId);
}
@Override
public synchronized RecoveredNMTokensState loadNMTokensState()
throws IOException {
// return a copy so caller can't modify our state
RecoveredNMTokensState result = new RecoveredNMTokensState();
result.currentMasterKey = nmTokenState.currentMasterKey;
result.previousMasterKey = nmTokenState.previousMasterKey;
result.applicationMasterKeys =
new HashMap<ApplicationAttemptId, MasterKey>(
nmTokenState.applicationMasterKeys);
return result;
}
@Override
public synchronized void storeNMTokenCurrentMasterKey(MasterKey key)
throws IOException {
MasterKeyPBImpl keypb = (MasterKeyPBImpl) key;
nmTokenState.currentMasterKey = new MasterKeyPBImpl(keypb.getProto());
}
@Override
public synchronized void storeNMTokenPreviousMasterKey(MasterKey key)
throws IOException {
MasterKeyPBImpl keypb = (MasterKeyPBImpl) key;
nmTokenState.previousMasterKey = new MasterKeyPBImpl(keypb.getProto());
}
@Override
public synchronized void storeNMTokenApplicationMasterKey(
ApplicationAttemptId attempt, MasterKey key) throws IOException {
MasterKeyPBImpl keypb = (MasterKeyPBImpl) key;
nmTokenState.applicationMasterKeys.put(attempt,
new MasterKeyPBImpl(keypb.getProto()));
}
@Override
public synchronized void removeNMTokenApplicationMasterKey(
ApplicationAttemptId attempt) throws IOException {
nmTokenState.applicationMasterKeys.remove(attempt);
}
@Override
public synchronized RecoveredContainerTokensState loadContainerTokensState()
throws IOException {
// return a copy so caller can't modify our state
RecoveredContainerTokensState result =
new RecoveredContainerTokensState();
result.currentMasterKey = containerTokenState.currentMasterKey;
result.previousMasterKey = containerTokenState.previousMasterKey;
result.activeTokens =
new HashMap<ContainerId, Long>(containerTokenState.activeTokens);
return result;
}
@Override
public synchronized void storeContainerTokenCurrentMasterKey(MasterKey key)
throws IOException {
MasterKeyPBImpl keypb = (MasterKeyPBImpl) key;
containerTokenState.currentMasterKey =
new MasterKeyPBImpl(keypb.getProto());
}
@Override
public synchronized void storeContainerTokenPreviousMasterKey(MasterKey key)
throws IOException {
MasterKeyPBImpl keypb = (MasterKeyPBImpl) key;
containerTokenState.previousMasterKey =
new MasterKeyPBImpl(keypb.getProto());
}
@Override
public synchronized void storeContainerToken(ContainerId containerId,
Long expirationTime) throws IOException {
containerTokenState.activeTokens.put(containerId, expirationTime);
}
@Override
public synchronized void removeContainerToken(ContainerId containerId)
throws IOException {
containerTokenState.activeTokens.remove(containerId);
}
@Override
public synchronized RecoveredLogDeleterState loadLogDeleterState()
throws IOException {
RecoveredLogDeleterState state = new RecoveredLogDeleterState();
state.logDeleterMap = new HashMap<ApplicationId,LogDeleterProto>(
logDeleterState);
return state;
}
@Override
public synchronized void storeLogDeleter(ApplicationId appId,
LogDeleterProto proto)
throws IOException {
logDeleterState.put(appId, proto);
}
@Override
public synchronized void removeLogDeleter(ApplicationId appId)
throws IOException {
logDeleterState.remove(appId);
}
private static class TrackerState {
Map<Path, LocalResourceProto> inProgressMap =
new HashMap<Path, LocalResourceProto>();
Map<Path, LocalizedResourceProto> localizedResources =
new HashMap<Path, LocalizedResourceProto>();
}
private static class TrackerKey {
String user;
ApplicationId appId;
public TrackerKey(String user, ApplicationId appId) {
this.user = user;
this.appId = appId;
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((appId == null) ? 0 : appId.hashCode());
result = prime * result + ((user == null) ? 0 : user.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (!(obj instanceof TrackerKey))
return false;
TrackerKey other = (TrackerKey) obj;
if (appId == null) {
if (other.appId != null)
return false;
} else if (!appId.equals(other.appId))
return false;
if (user == null) {
if (other.user != null)
return false;
} else if (!user.equals(other.user))
return false;
return true;
}
}
}
| 15,219 | 33.908257 | 105 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/recovery/TestNMLeveldbStateStoreService.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.nodemanager.recovery;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.assertFalse;
import java.io.File;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.service.ServiceStateException;
import org.apache.hadoop.yarn.api.protocolrecords.StartContainerRequest;
import org.apache.hadoop.yarn.api.records.ApplicationAccessType;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ContainerExitStatus;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
import org.apache.hadoop.yarn.api.records.LocalResource;
import org.apache.hadoop.yarn.api.records.LocalResourceType;
import org.apache.hadoop.yarn.api.records.LocalResourceVisibility;
import org.apache.hadoop.yarn.api.records.Priority;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.api.records.Token;
import org.apache.hadoop.yarn.api.records.URL;
import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationIdPBImpl;
import org.apache.hadoop.yarn.api.records.impl.pb.LocalResourcePBImpl;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.proto.YarnProtos.LocalResourceProto;
import org.apache.hadoop.yarn.proto.YarnServerNodemanagerRecoveryProtos.ContainerManagerApplicationProto;
import org.apache.hadoop.yarn.proto.YarnServerNodemanagerRecoveryProtos.DeletionServiceDeleteTaskProto;
import org.apache.hadoop.yarn.proto.YarnServerNodemanagerRecoveryProtos.LocalizedResourceProto;
import org.apache.hadoop.yarn.proto.YarnServerNodemanagerRecoveryProtos.LogDeleterProto;
import org.apache.hadoop.yarn.security.ContainerTokenIdentifier;
import org.apache.hadoop.yarn.server.api.records.MasterKey;
import org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService.LocalResourceTrackerState;
import org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService.RecoveredApplicationsState;
import org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService.RecoveredContainerState;
import org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService.RecoveredContainerStatus;
import org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService.RecoveredContainerTokensState;
import org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService.RecoveredDeletionServiceState;
import org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService.RecoveredLocalizationState;
import org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService.RecoveredLogDeleterState;
import org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService.RecoveredNMTokensState;
import org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService.RecoveredUserResources;
import org.apache.hadoop.yarn.server.records.Version;
import org.apache.hadoop.yarn.server.security.BaseContainerTokenSecretManager;
import org.apache.hadoop.yarn.server.security.BaseNMTokenSecretManager;
import org.apache.hadoop.yarn.server.utils.BuilderUtils;
import org.apache.hadoop.yarn.util.ConverterUtils;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
public class TestNMLeveldbStateStoreService {
private static final File TMP_DIR = new File(
System.getProperty("test.build.data",
System.getProperty("java.io.tmpdir")),
TestNMLeveldbStateStoreService.class.getName());
YarnConfiguration conf;
NMLeveldbStateStoreService stateStore;
@Before
public void setup() throws IOException {
FileUtil.fullyDelete(TMP_DIR);
conf = new YarnConfiguration();
conf.setBoolean(YarnConfiguration.NM_RECOVERY_ENABLED, true);
conf.set(YarnConfiguration.NM_RECOVERY_DIR, TMP_DIR.toString());
restartStateStore();
}
@After
public void cleanup() throws IOException {
if (stateStore != null) {
stateStore.close();
}
FileUtil.fullyDelete(TMP_DIR);
}
private void restartStateStore() throws IOException {
// need to close so leveldb releases database lock
if (stateStore != null) {
stateStore.close();
}
stateStore = new NMLeveldbStateStoreService();
stateStore.init(conf);
stateStore.start();
}
private void verifyEmptyState() throws IOException {
RecoveredLocalizationState state = stateStore.loadLocalizationState();
assertNotNull(state);
LocalResourceTrackerState pubts = state.getPublicTrackerState();
assertNotNull(pubts);
assertTrue(pubts.getLocalizedResources().isEmpty());
assertTrue(pubts.getInProgressResources().isEmpty());
assertTrue(state.getUserResources().isEmpty());
}
@Test
public void testIsNewlyCreated() throws IOException {
assertTrue(stateStore.isNewlyCreated());
restartStateStore();
assertFalse(stateStore.isNewlyCreated());
}
@Test
public void testEmptyState() throws IOException {
assertTrue(stateStore.canRecover());
verifyEmptyState();
}
@Test
public void testCheckVersion() throws IOException {
// default version
Version defaultVersion = stateStore.getCurrentVersion();
Assert.assertEquals(defaultVersion, stateStore.loadVersion());
// compatible version
Version compatibleVersion =
Version.newInstance(defaultVersion.getMajorVersion(),
defaultVersion.getMinorVersion() + 2);
stateStore.storeVersion(compatibleVersion);
Assert.assertEquals(compatibleVersion, stateStore.loadVersion());
restartStateStore();
// overwrite the compatible version
Assert.assertEquals(defaultVersion, stateStore.loadVersion());
// incompatible version
Version incompatibleVersion =
Version.newInstance(defaultVersion.getMajorVersion() + 1,
defaultVersion.getMinorVersion());
stateStore.storeVersion(incompatibleVersion);
try {
restartStateStore();
Assert.fail("Incompatible version, should expect fail here.");
} catch (ServiceStateException e) {
Assert.assertTrue("Exception message mismatch",
e.getMessage().contains("Incompatible version for NM state:"));
}
}
@Test
public void testApplicationStorage() throws IOException {
// test empty when no state
RecoveredApplicationsState state = stateStore.loadApplicationsState();
assertTrue(state.getApplications().isEmpty());
assertTrue(state.getFinishedApplications().isEmpty());
// store an application and verify recovered
final ApplicationId appId1 = ApplicationId.newInstance(1234, 1);
ContainerManagerApplicationProto.Builder builder =
ContainerManagerApplicationProto.newBuilder();
builder.setId(((ApplicationIdPBImpl) appId1).getProto());
builder.setUser("user1");
ContainerManagerApplicationProto appProto1 = builder.build();
stateStore.storeApplication(appId1, appProto1);
restartStateStore();
state = stateStore.loadApplicationsState();
assertEquals(1, state.getApplications().size());
assertEquals(appProto1, state.getApplications().get(0));
assertTrue(state.getFinishedApplications().isEmpty());
// finish an application and add a new one
stateStore.storeFinishedApplication(appId1);
final ApplicationId appId2 = ApplicationId.newInstance(1234, 2);
builder = ContainerManagerApplicationProto.newBuilder();
builder.setId(((ApplicationIdPBImpl) appId2).getProto());
builder.setUser("user2");
ContainerManagerApplicationProto appProto2 = builder.build();
stateStore.storeApplication(appId2, appProto2);
restartStateStore();
state = stateStore.loadApplicationsState();
assertEquals(2, state.getApplications().size());
assertTrue(state.getApplications().contains(appProto1));
assertTrue(state.getApplications().contains(appProto2));
assertEquals(1, state.getFinishedApplications().size());
assertEquals(appId1, state.getFinishedApplications().get(0));
// test removing an application
stateStore.storeFinishedApplication(appId2);
stateStore.removeApplication(appId2);
restartStateStore();
state = stateStore.loadApplicationsState();
assertEquals(1, state.getApplications().size());
assertEquals(appProto1, state.getApplications().get(0));
assertEquals(1, state.getFinishedApplications().size());
assertEquals(appId1, state.getFinishedApplications().get(0));
}
@Test
public void testContainerStorage() throws IOException {
// test empty when no state
List<RecoveredContainerState> recoveredContainers =
stateStore.loadContainersState();
assertTrue(recoveredContainers.isEmpty());
// create a container request
ApplicationId appId = ApplicationId.newInstance(1234, 3);
ApplicationAttemptId appAttemptId =
ApplicationAttemptId.newInstance(appId, 4);
ContainerId containerId = ContainerId.newContainerId(appAttemptId, 5);
LocalResource lrsrc = LocalResource.newInstance(
URL.newInstance("hdfs", "somehost", 12345, "/some/path/to/rsrc"),
LocalResourceType.FILE, LocalResourceVisibility.APPLICATION, 123L,
1234567890L);
Map<String, LocalResource> localResources =
new HashMap<String, LocalResource>();
localResources.put("rsrc", lrsrc);
Map<String, String> env = new HashMap<String, String>();
env.put("somevar", "someval");
List<String> containerCmds = new ArrayList<String>();
containerCmds.add("somecmd");
containerCmds.add("somearg");
Map<String, ByteBuffer> serviceData = new HashMap<String, ByteBuffer>();
serviceData.put("someservice",
ByteBuffer.wrap(new byte[] { 0x1, 0x2, 0x3 }));
ByteBuffer containerTokens =
ByteBuffer.wrap(new byte[] { 0x7, 0x8, 0x9, 0xa });
Map<ApplicationAccessType, String> acls =
new HashMap<ApplicationAccessType, String>();
acls.put(ApplicationAccessType.VIEW_APP, "viewuser");
acls.put(ApplicationAccessType.MODIFY_APP, "moduser");
ContainerLaunchContext clc = ContainerLaunchContext.newInstance(
localResources, env, containerCmds, serviceData, containerTokens,
acls);
Resource containerRsrc = Resource.newInstance(1357, 3);
ContainerTokenIdentifier containerTokenId =
new ContainerTokenIdentifier(containerId, "host", "user",
containerRsrc, 9876543210L, 42, 2468, Priority.newInstance(7),
13579);
Token containerToken = Token.newInstance(containerTokenId.getBytes(),
ContainerTokenIdentifier.KIND.toString(), "password".getBytes(),
"tokenservice");
StartContainerRequest containerReq =
StartContainerRequest.newInstance(clc, containerToken);
// store a container and verify recovered
stateStore.storeContainer(containerId, containerReq);
restartStateStore();
recoveredContainers = stateStore.loadContainersState();
assertEquals(1, recoveredContainers.size());
RecoveredContainerState rcs = recoveredContainers.get(0);
assertEquals(RecoveredContainerStatus.REQUESTED, rcs.getStatus());
assertEquals(ContainerExitStatus.INVALID, rcs.getExitCode());
assertEquals(false, rcs.getKilled());
assertEquals(containerReq, rcs.getStartRequest());
assertTrue(rcs.getDiagnostics().isEmpty());
// store a new container record without StartContainerRequest
ContainerId containerId1 = ContainerId.newContainerId(appAttemptId, 6);
stateStore.storeContainerLaunched(containerId1);
recoveredContainers = stateStore.loadContainersState();
// check whether the new container record is discarded
assertEquals(1, recoveredContainers.size());
// launch the container, add some diagnostics, and verify recovered
StringBuilder diags = new StringBuilder();
stateStore.storeContainerLaunched(containerId);
diags.append("some diags for container");
stateStore.storeContainerDiagnostics(containerId, diags);
restartStateStore();
recoveredContainers = stateStore.loadContainersState();
assertEquals(1, recoveredContainers.size());
rcs = recoveredContainers.get(0);
assertEquals(RecoveredContainerStatus.LAUNCHED, rcs.getStatus());
assertEquals(ContainerExitStatus.INVALID, rcs.getExitCode());
assertEquals(false, rcs.getKilled());
assertEquals(containerReq, rcs.getStartRequest());
assertEquals(diags.toString(), rcs.getDiagnostics());
// mark the container killed, add some more diags, and verify recovered
diags.append("some more diags for container");
stateStore.storeContainerDiagnostics(containerId, diags);
stateStore.storeContainerKilled(containerId);
restartStateStore();
recoveredContainers = stateStore.loadContainersState();
assertEquals(1, recoveredContainers.size());
rcs = recoveredContainers.get(0);
assertEquals(RecoveredContainerStatus.LAUNCHED, rcs.getStatus());
assertEquals(ContainerExitStatus.INVALID, rcs.getExitCode());
assertTrue(rcs.getKilled());
assertEquals(containerReq, rcs.getStartRequest());
assertEquals(diags.toString(), rcs.getDiagnostics());
// add yet more diags, mark container completed, and verify recovered
diags.append("some final diags");
stateStore.storeContainerDiagnostics(containerId, diags);
stateStore.storeContainerCompleted(containerId, 21);
restartStateStore();
recoveredContainers = stateStore.loadContainersState();
assertEquals(1, recoveredContainers.size());
rcs = recoveredContainers.get(0);
assertEquals(RecoveredContainerStatus.COMPLETED, rcs.getStatus());
assertEquals(21, rcs.getExitCode());
assertTrue(rcs.getKilled());
assertEquals(containerReq, rcs.getStartRequest());
assertEquals(diags.toString(), rcs.getDiagnostics());
// remove the container and verify not recovered
stateStore.removeContainer(containerId);
restartStateStore();
recoveredContainers = stateStore.loadContainersState();
assertTrue(recoveredContainers.isEmpty());
}
@Test
public void testStartResourceLocalization() throws IOException {
String user = "somebody";
ApplicationId appId = ApplicationId.newInstance(1, 1);
// start a local resource for an application
Path appRsrcPath = new Path("hdfs://some/app/resource");
LocalResourcePBImpl rsrcPb = (LocalResourcePBImpl)
LocalResource.newInstance(
ConverterUtils.getYarnUrlFromPath(appRsrcPath),
LocalResourceType.ARCHIVE, LocalResourceVisibility.APPLICATION,
123L, 456L);
LocalResourceProto appRsrcProto = rsrcPb.getProto();
Path appRsrcLocalPath = new Path("/some/local/dir/for/apprsrc");
stateStore.startResourceLocalization(user, appId, appRsrcProto,
appRsrcLocalPath);
// restart and verify only app resource is marked in-progress
restartStateStore();
RecoveredLocalizationState state = stateStore.loadLocalizationState();
LocalResourceTrackerState pubts = state.getPublicTrackerState();
assertTrue(pubts.getLocalizedResources().isEmpty());
assertTrue(pubts.getInProgressResources().isEmpty());
Map<String, RecoveredUserResources> userResources =
state.getUserResources();
assertEquals(1, userResources.size());
RecoveredUserResources rur = userResources.get(user);
LocalResourceTrackerState privts = rur.getPrivateTrackerState();
assertNotNull(privts);
assertTrue(privts.getLocalizedResources().isEmpty());
assertTrue(privts.getInProgressResources().isEmpty());
assertEquals(1, rur.getAppTrackerStates().size());
LocalResourceTrackerState appts = rur.getAppTrackerStates().get(appId);
assertNotNull(appts);
assertTrue(appts.getLocalizedResources().isEmpty());
assertEquals(1, appts.getInProgressResources().size());
assertEquals(appRsrcLocalPath,
appts.getInProgressResources().get(appRsrcProto));
// start some public and private resources
Path pubRsrcPath1 = new Path("hdfs://some/public/resource1");
rsrcPb = (LocalResourcePBImpl) LocalResource.newInstance(
ConverterUtils.getYarnUrlFromPath(pubRsrcPath1),
LocalResourceType.FILE, LocalResourceVisibility.PUBLIC,
789L, 135L);
LocalResourceProto pubRsrcProto1 = rsrcPb.getProto();
Path pubRsrcLocalPath1 = new Path("/some/local/dir/for/pubrsrc1");
stateStore.startResourceLocalization(null, null, pubRsrcProto1,
pubRsrcLocalPath1);
Path pubRsrcPath2 = new Path("hdfs://some/public/resource2");
rsrcPb = (LocalResourcePBImpl) LocalResource.newInstance(
ConverterUtils.getYarnUrlFromPath(pubRsrcPath2),
LocalResourceType.FILE, LocalResourceVisibility.PUBLIC,
789L, 135L);
LocalResourceProto pubRsrcProto2 = rsrcPb.getProto();
Path pubRsrcLocalPath2 = new Path("/some/local/dir/for/pubrsrc2");
stateStore.startResourceLocalization(null, null, pubRsrcProto2,
pubRsrcLocalPath2);
Path privRsrcPath = new Path("hdfs://some/private/resource");
rsrcPb = (LocalResourcePBImpl) LocalResource.newInstance(
ConverterUtils.getYarnUrlFromPath(privRsrcPath),
LocalResourceType.PATTERN, LocalResourceVisibility.PRIVATE,
789L, 680L, "*pattern*");
LocalResourceProto privRsrcProto = rsrcPb.getProto();
Path privRsrcLocalPath = new Path("/some/local/dir/for/privrsrc");
stateStore.startResourceLocalization(user, null, privRsrcProto,
privRsrcLocalPath);
// restart and verify resources are marked in-progress
restartStateStore();
state = stateStore.loadLocalizationState();
pubts = state.getPublicTrackerState();
assertTrue(pubts.getLocalizedResources().isEmpty());
assertEquals(2, pubts.getInProgressResources().size());
assertEquals(pubRsrcLocalPath1,
pubts.getInProgressResources().get(pubRsrcProto1));
assertEquals(pubRsrcLocalPath2,
pubts.getInProgressResources().get(pubRsrcProto2));
userResources = state.getUserResources();
assertEquals(1, userResources.size());
rur = userResources.get(user);
privts = rur.getPrivateTrackerState();
assertNotNull(privts);
assertTrue(privts.getLocalizedResources().isEmpty());
assertEquals(1, privts.getInProgressResources().size());
assertEquals(privRsrcLocalPath,
privts.getInProgressResources().get(privRsrcProto));
assertEquals(1, rur.getAppTrackerStates().size());
appts = rur.getAppTrackerStates().get(appId);
assertNotNull(appts);
assertTrue(appts.getLocalizedResources().isEmpty());
assertEquals(1, appts.getInProgressResources().size());
assertEquals(appRsrcLocalPath,
appts.getInProgressResources().get(appRsrcProto));
}
@Test
public void testFinishResourceLocalization() throws IOException {
String user = "somebody";
ApplicationId appId = ApplicationId.newInstance(1, 1);
// start and finish a local resource for an application
Path appRsrcPath = new Path("hdfs://some/app/resource");
LocalResourcePBImpl rsrcPb = (LocalResourcePBImpl)
LocalResource.newInstance(
ConverterUtils.getYarnUrlFromPath(appRsrcPath),
LocalResourceType.ARCHIVE, LocalResourceVisibility.APPLICATION,
123L, 456L);
LocalResourceProto appRsrcProto = rsrcPb.getProto();
Path appRsrcLocalPath = new Path("/some/local/dir/for/apprsrc");
stateStore.startResourceLocalization(user, appId, appRsrcProto,
appRsrcLocalPath);
LocalizedResourceProto appLocalizedProto =
LocalizedResourceProto.newBuilder()
.setResource(appRsrcProto)
.setLocalPath(appRsrcLocalPath.toString())
.setSize(1234567L)
.build();
stateStore.finishResourceLocalization(user, appId, appLocalizedProto);
// restart and verify only app resource is completed
restartStateStore();
RecoveredLocalizationState state = stateStore.loadLocalizationState();
LocalResourceTrackerState pubts = state.getPublicTrackerState();
assertTrue(pubts.getLocalizedResources().isEmpty());
assertTrue(pubts.getInProgressResources().isEmpty());
Map<String, RecoveredUserResources> userResources =
state.getUserResources();
assertEquals(1, userResources.size());
RecoveredUserResources rur = userResources.get(user);
LocalResourceTrackerState privts = rur.getPrivateTrackerState();
assertNotNull(privts);
assertTrue(privts.getLocalizedResources().isEmpty());
assertTrue(privts.getInProgressResources().isEmpty());
assertEquals(1, rur.getAppTrackerStates().size());
LocalResourceTrackerState appts = rur.getAppTrackerStates().get(appId);
assertNotNull(appts);
assertTrue(appts.getInProgressResources().isEmpty());
assertEquals(1, appts.getLocalizedResources().size());
assertEquals(appLocalizedProto,
appts.getLocalizedResources().iterator().next());
// start some public and private resources
Path pubRsrcPath1 = new Path("hdfs://some/public/resource1");
rsrcPb = (LocalResourcePBImpl) LocalResource.newInstance(
ConverterUtils.getYarnUrlFromPath(pubRsrcPath1),
LocalResourceType.FILE, LocalResourceVisibility.PUBLIC,
789L, 135L);
LocalResourceProto pubRsrcProto1 = rsrcPb.getProto();
Path pubRsrcLocalPath1 = new Path("/some/local/dir/for/pubrsrc1");
stateStore.startResourceLocalization(null, null, pubRsrcProto1,
pubRsrcLocalPath1);
Path pubRsrcPath2 = new Path("hdfs://some/public/resource2");
rsrcPb = (LocalResourcePBImpl) LocalResource.newInstance(
ConverterUtils.getYarnUrlFromPath(pubRsrcPath2),
LocalResourceType.FILE, LocalResourceVisibility.PUBLIC,
789L, 135L);
LocalResourceProto pubRsrcProto2 = rsrcPb.getProto();
Path pubRsrcLocalPath2 = new Path("/some/local/dir/for/pubrsrc2");
stateStore.startResourceLocalization(null, null, pubRsrcProto2,
pubRsrcLocalPath2);
Path privRsrcPath = new Path("hdfs://some/private/resource");
rsrcPb = (LocalResourcePBImpl) LocalResource.newInstance(
ConverterUtils.getYarnUrlFromPath(privRsrcPath),
LocalResourceType.PATTERN, LocalResourceVisibility.PRIVATE,
789L, 680L, "*pattern*");
LocalResourceProto privRsrcProto = rsrcPb.getProto();
Path privRsrcLocalPath = new Path("/some/local/dir/for/privrsrc");
stateStore.startResourceLocalization(user, null, privRsrcProto,
privRsrcLocalPath);
// finish some of the resources
LocalizedResourceProto pubLocalizedProto1 =
LocalizedResourceProto.newBuilder()
.setResource(pubRsrcProto1)
.setLocalPath(pubRsrcLocalPath1.toString())
.setSize(pubRsrcProto1.getSize())
.build();
stateStore.finishResourceLocalization(null, null, pubLocalizedProto1);
LocalizedResourceProto privLocalizedProto =
LocalizedResourceProto.newBuilder()
.setResource(privRsrcProto)
.setLocalPath(privRsrcLocalPath.toString())
.setSize(privRsrcProto.getSize())
.build();
stateStore.finishResourceLocalization(user, null, privLocalizedProto);
// restart and verify state
restartStateStore();
state = stateStore.loadLocalizationState();
pubts = state.getPublicTrackerState();
assertEquals(1, pubts.getLocalizedResources().size());
assertEquals(pubLocalizedProto1,
pubts.getLocalizedResources().iterator().next());
assertEquals(1, pubts.getInProgressResources().size());
assertEquals(pubRsrcLocalPath2,
pubts.getInProgressResources().get(pubRsrcProto2));
userResources = state.getUserResources();
assertEquals(1, userResources.size());
rur = userResources.get(user);
privts = rur.getPrivateTrackerState();
assertNotNull(privts);
assertEquals(1, privts.getLocalizedResources().size());
assertEquals(privLocalizedProto,
privts.getLocalizedResources().iterator().next());
assertTrue(privts.getInProgressResources().isEmpty());
assertEquals(1, rur.getAppTrackerStates().size());
appts = rur.getAppTrackerStates().get(appId);
assertNotNull(appts);
assertTrue(appts.getInProgressResources().isEmpty());
assertEquals(1, appts.getLocalizedResources().size());
assertEquals(appLocalizedProto,
appts.getLocalizedResources().iterator().next());
}
@Test
public void testRemoveLocalizedResource() throws IOException {
String user = "somebody";
ApplicationId appId = ApplicationId.newInstance(1, 1);
// go through the complete lifecycle for an application local resource
Path appRsrcPath = new Path("hdfs://some/app/resource");
LocalResourcePBImpl rsrcPb = (LocalResourcePBImpl)
LocalResource.newInstance(
ConverterUtils.getYarnUrlFromPath(appRsrcPath),
LocalResourceType.ARCHIVE, LocalResourceVisibility.APPLICATION,
123L, 456L);
LocalResourceProto appRsrcProto = rsrcPb.getProto();
Path appRsrcLocalPath = new Path("/some/local/dir/for/apprsrc");
stateStore.startResourceLocalization(user, appId, appRsrcProto,
appRsrcLocalPath);
LocalizedResourceProto appLocalizedProto =
LocalizedResourceProto.newBuilder()
.setResource(appRsrcProto)
.setLocalPath(appRsrcLocalPath.toString())
.setSize(1234567L)
.build();
stateStore.finishResourceLocalization(user, appId, appLocalizedProto);
stateStore.removeLocalizedResource(user, appId, appRsrcLocalPath);
restartStateStore();
verifyEmptyState();
// remove an app resource that didn't finish
stateStore.startResourceLocalization(user, appId, appRsrcProto,
appRsrcLocalPath);
stateStore.removeLocalizedResource(user, appId, appRsrcLocalPath);
restartStateStore();
verifyEmptyState();
// add public and private resources and remove some
Path pubRsrcPath1 = new Path("hdfs://some/public/resource1");
rsrcPb = (LocalResourcePBImpl) LocalResource.newInstance(
ConverterUtils.getYarnUrlFromPath(pubRsrcPath1),
LocalResourceType.FILE, LocalResourceVisibility.PUBLIC,
789L, 135L);
LocalResourceProto pubRsrcProto1 = rsrcPb.getProto();
Path pubRsrcLocalPath1 = new Path("/some/local/dir/for/pubrsrc1");
stateStore.startResourceLocalization(null, null, pubRsrcProto1,
pubRsrcLocalPath1);
LocalizedResourceProto pubLocalizedProto1 =
LocalizedResourceProto.newBuilder()
.setResource(pubRsrcProto1)
.setLocalPath(pubRsrcLocalPath1.toString())
.setSize(789L)
.build();
stateStore.finishResourceLocalization(null, null, pubLocalizedProto1);
Path pubRsrcPath2 = new Path("hdfs://some/public/resource2");
rsrcPb = (LocalResourcePBImpl) LocalResource.newInstance(
ConverterUtils.getYarnUrlFromPath(pubRsrcPath2),
LocalResourceType.FILE, LocalResourceVisibility.PUBLIC,
789L, 135L);
LocalResourceProto pubRsrcProto2 = rsrcPb.getProto();
Path pubRsrcLocalPath2 = new Path("/some/local/dir/for/pubrsrc2");
stateStore.startResourceLocalization(null, null, pubRsrcProto2,
pubRsrcLocalPath2);
LocalizedResourceProto pubLocalizedProto2 =
LocalizedResourceProto.newBuilder()
.setResource(pubRsrcProto2)
.setLocalPath(pubRsrcLocalPath2.toString())
.setSize(7654321L)
.build();
stateStore.finishResourceLocalization(null, null, pubLocalizedProto2);
stateStore.removeLocalizedResource(null, null, pubRsrcLocalPath2);
Path privRsrcPath = new Path("hdfs://some/private/resource");
rsrcPb = (LocalResourcePBImpl) LocalResource.newInstance(
ConverterUtils.getYarnUrlFromPath(privRsrcPath),
LocalResourceType.PATTERN, LocalResourceVisibility.PRIVATE,
789L, 680L, "*pattern*");
LocalResourceProto privRsrcProto = rsrcPb.getProto();
Path privRsrcLocalPath = new Path("/some/local/dir/for/privrsrc");
stateStore.startResourceLocalization(user, null, privRsrcProto,
privRsrcLocalPath);
stateStore.removeLocalizedResource(user, null, privRsrcLocalPath);
// restart and verify state
restartStateStore();
RecoveredLocalizationState state = stateStore.loadLocalizationState();
LocalResourceTrackerState pubts = state.getPublicTrackerState();
assertTrue(pubts.getInProgressResources().isEmpty());
assertEquals(1, pubts.getLocalizedResources().size());
assertEquals(pubLocalizedProto1,
pubts.getLocalizedResources().iterator().next());
Map<String, RecoveredUserResources> userResources =
state.getUserResources();
assertTrue(userResources.isEmpty());
}
@Test
public void testDeletionTaskStorage() throws IOException {
// test empty when no state
RecoveredDeletionServiceState state =
stateStore.loadDeletionServiceState();
assertTrue(state.getTasks().isEmpty());
// store a deletion task and verify recovered
DeletionServiceDeleteTaskProto proto =
DeletionServiceDeleteTaskProto.newBuilder()
.setId(7)
.setUser("someuser")
.setSubdir("some/subdir")
.addBasedirs("some/dir/path")
.addBasedirs("some/other/dir/path")
.setDeletionTime(123456L)
.addSuccessorIds(8)
.addSuccessorIds(9)
.build();
stateStore.storeDeletionTask(proto.getId(), proto);
restartStateStore();
state = stateStore.loadDeletionServiceState();
assertEquals(1, state.getTasks().size());
assertEquals(proto, state.getTasks().get(0));
// store another deletion task
DeletionServiceDeleteTaskProto proto2 =
DeletionServiceDeleteTaskProto.newBuilder()
.setId(8)
.setUser("user2")
.setSubdir("subdir2")
.setDeletionTime(789L)
.build();
stateStore.storeDeletionTask(proto2.getId(), proto2);
restartStateStore();
state = stateStore.loadDeletionServiceState();
assertEquals(2, state.getTasks().size());
assertTrue(state.getTasks().contains(proto));
assertTrue(state.getTasks().contains(proto2));
// delete a task and verify gone after recovery
stateStore.removeDeletionTask(proto2.getId());
restartStateStore();
state = stateStore.loadDeletionServiceState();
assertEquals(1, state.getTasks().size());
assertEquals(proto, state.getTasks().get(0));
// delete the last task and verify none left
stateStore.removeDeletionTask(proto.getId());
restartStateStore();
state = stateStore.loadDeletionServiceState();
assertTrue(state.getTasks().isEmpty());
}
@Test
public void testNMTokenStorage() throws IOException {
// test empty when no state
RecoveredNMTokensState state = stateStore.loadNMTokensState();
assertNull(state.getCurrentMasterKey());
assertNull(state.getPreviousMasterKey());
assertTrue(state.getApplicationMasterKeys().isEmpty());
// store a master key and verify recovered
NMTokenSecretManagerForTest secretMgr = new NMTokenSecretManagerForTest();
MasterKey currentKey = secretMgr.generateKey();
stateStore.storeNMTokenCurrentMasterKey(currentKey);
restartStateStore();
state = stateStore.loadNMTokensState();
assertEquals(currentKey, state.getCurrentMasterKey());
assertNull(state.getPreviousMasterKey());
assertTrue(state.getApplicationMasterKeys().isEmpty());
// store a previous key and verify recovered
MasterKey prevKey = secretMgr.generateKey();
stateStore.storeNMTokenPreviousMasterKey(prevKey);
restartStateStore();
state = stateStore.loadNMTokensState();
assertEquals(currentKey, state.getCurrentMasterKey());
assertEquals(prevKey, state.getPreviousMasterKey());
assertTrue(state.getApplicationMasterKeys().isEmpty());
// store a few application keys and verify recovered
ApplicationAttemptId attempt1 = ApplicationAttemptId.newInstance(
ApplicationId.newInstance(1, 1), 1);
MasterKey attemptKey1 = secretMgr.generateKey();
stateStore.storeNMTokenApplicationMasterKey(attempt1, attemptKey1);
ApplicationAttemptId attempt2 = ApplicationAttemptId.newInstance(
ApplicationId.newInstance(2, 3), 4);
MasterKey attemptKey2 = secretMgr.generateKey();
stateStore.storeNMTokenApplicationMasterKey(attempt2, attemptKey2);
restartStateStore();
state = stateStore.loadNMTokensState();
assertEquals(currentKey, state.getCurrentMasterKey());
assertEquals(prevKey, state.getPreviousMasterKey());
Map<ApplicationAttemptId, MasterKey> loadedAppKeys =
state.getApplicationMasterKeys();
assertEquals(2, loadedAppKeys.size());
assertEquals(attemptKey1, loadedAppKeys.get(attempt1));
assertEquals(attemptKey2, loadedAppKeys.get(attempt2));
// add/update/remove keys and verify recovered
ApplicationAttemptId attempt3 = ApplicationAttemptId.newInstance(
ApplicationId.newInstance(5, 6), 7);
MasterKey attemptKey3 = secretMgr.generateKey();
stateStore.storeNMTokenApplicationMasterKey(attempt3, attemptKey3);
stateStore.removeNMTokenApplicationMasterKey(attempt1);
attemptKey2 = prevKey;
stateStore.storeNMTokenApplicationMasterKey(attempt2, attemptKey2);
prevKey = currentKey;
stateStore.storeNMTokenPreviousMasterKey(prevKey);
currentKey = secretMgr.generateKey();
stateStore.storeNMTokenCurrentMasterKey(currentKey);
restartStateStore();
state = stateStore.loadNMTokensState();
assertEquals(currentKey, state.getCurrentMasterKey());
assertEquals(prevKey, state.getPreviousMasterKey());
loadedAppKeys = state.getApplicationMasterKeys();
assertEquals(2, loadedAppKeys.size());
assertNull(loadedAppKeys.get(attempt1));
assertEquals(attemptKey2, loadedAppKeys.get(attempt2));
assertEquals(attemptKey3, loadedAppKeys.get(attempt3));
}
@Test
public void testContainerTokenStorage() throws IOException {
// test empty when no state
RecoveredContainerTokensState state =
stateStore.loadContainerTokensState();
assertNull(state.getCurrentMasterKey());
assertNull(state.getPreviousMasterKey());
assertTrue(state.getActiveTokens().isEmpty());
// store a master key and verify recovered
ContainerTokenKeyGeneratorForTest keygen =
new ContainerTokenKeyGeneratorForTest(new YarnConfiguration());
MasterKey currentKey = keygen.generateKey();
stateStore.storeContainerTokenCurrentMasterKey(currentKey);
restartStateStore();
state = stateStore.loadContainerTokensState();
assertEquals(currentKey, state.getCurrentMasterKey());
assertNull(state.getPreviousMasterKey());
assertTrue(state.getActiveTokens().isEmpty());
// store a previous key and verify recovered
MasterKey prevKey = keygen.generateKey();
stateStore.storeContainerTokenPreviousMasterKey(prevKey);
restartStateStore();
state = stateStore.loadContainerTokensState();
assertEquals(currentKey, state.getCurrentMasterKey());
assertEquals(prevKey, state.getPreviousMasterKey());
assertTrue(state.getActiveTokens().isEmpty());
// store a few container tokens and verify recovered
ContainerId cid1 = BuilderUtils.newContainerId(1, 1, 1, 1);
Long expTime1 = 1234567890L;
ContainerId cid2 = BuilderUtils.newContainerId(2, 2, 2, 2);
Long expTime2 = 9876543210L;
stateStore.storeContainerToken(cid1, expTime1);
stateStore.storeContainerToken(cid2, expTime2);
restartStateStore();
state = stateStore.loadContainerTokensState();
assertEquals(currentKey, state.getCurrentMasterKey());
assertEquals(prevKey, state.getPreviousMasterKey());
Map<ContainerId, Long> loadedActiveTokens =
state.getActiveTokens();
assertEquals(2, loadedActiveTokens.size());
assertEquals(expTime1, loadedActiveTokens.get(cid1));
assertEquals(expTime2, loadedActiveTokens.get(cid2));
// add/update/remove tokens and verify recovered
ContainerId cid3 = BuilderUtils.newContainerId(3, 3, 3, 3);
Long expTime3 = 135798642L;
stateStore.storeContainerToken(cid3, expTime3);
stateStore.removeContainerToken(cid1);
expTime2 += 246897531L;
stateStore.storeContainerToken(cid2, expTime2);
prevKey = currentKey;
stateStore.storeContainerTokenPreviousMasterKey(prevKey);
currentKey = keygen.generateKey();
stateStore.storeContainerTokenCurrentMasterKey(currentKey);
restartStateStore();
state = stateStore.loadContainerTokensState();
assertEquals(currentKey, state.getCurrentMasterKey());
assertEquals(prevKey, state.getPreviousMasterKey());
loadedActiveTokens = state.getActiveTokens();
assertEquals(2, loadedActiveTokens.size());
assertNull(loadedActiveTokens.get(cid1));
assertEquals(expTime2, loadedActiveTokens.get(cid2));
assertEquals(expTime3, loadedActiveTokens.get(cid3));
}
@Test
public void testLogDeleterStorage() throws IOException {
// test empty when no state
RecoveredLogDeleterState state = stateStore.loadLogDeleterState();
assertTrue(state.getLogDeleterMap().isEmpty());
// store log deleter state
final ApplicationId appId1 = ApplicationId.newInstance(1, 1);
LogDeleterProto proto1 = LogDeleterProto.newBuilder()
.setUser("user1")
.setDeletionTime(1234)
.build();
stateStore.storeLogDeleter(appId1, proto1);
// restart state store and verify recovered
restartStateStore();
state = stateStore.loadLogDeleterState();
assertEquals(1, state.getLogDeleterMap().size());
assertEquals(proto1, state.getLogDeleterMap().get(appId1));
// store another log deleter
final ApplicationId appId2 = ApplicationId.newInstance(2, 2);
LogDeleterProto proto2 = LogDeleterProto.newBuilder()
.setUser("user2")
.setDeletionTime(5678)
.build();
stateStore.storeLogDeleter(appId2, proto2);
// restart state store and verify recovered
restartStateStore();
state = stateStore.loadLogDeleterState();
assertEquals(2, state.getLogDeleterMap().size());
assertEquals(proto1, state.getLogDeleterMap().get(appId1));
assertEquals(proto2, state.getLogDeleterMap().get(appId2));
// remove a deleter and verify removed after restart and recovery
stateStore.removeLogDeleter(appId1);
restartStateStore();
state = stateStore.loadLogDeleterState();
assertEquals(1, state.getLogDeleterMap().size());
assertEquals(proto2, state.getLogDeleterMap().get(appId2));
// remove last deleter and verify empty after restart and recovery
stateStore.removeLogDeleter(appId2);
restartStateStore();
state = stateStore.loadLogDeleterState();
assertTrue(state.getLogDeleterMap().isEmpty());
}
private static class NMTokenSecretManagerForTest extends
BaseNMTokenSecretManager {
public MasterKey generateKey() {
return createNewMasterKey().getMasterKey();
}
}
private static class ContainerTokenKeyGeneratorForTest extends
BaseContainerTokenSecretManager {
public ContainerTokenKeyGeneratorForTest(Configuration conf) {
super(conf);
}
public MasterKey generateKey() {
return createNewMasterKey().getMasterKey();
}
}
}
| 40,460 | 43.807309 | 108 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestAuxServices.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.nodemanager.containermanager;
import static org.apache.hadoop.service.Service.STATE.INITED;
import static org.apache.hadoop.service.Service.STATE.STARTED;
import static org.apache.hadoop.service.Service.STATE.STOPPED;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.io.File;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Map;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.service.Service;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.Priority;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.security.ContainerTokenIdentifier;
import org.apache.hadoop.yarn.server.api.ApplicationInitializationContext;
import org.apache.hadoop.yarn.server.api.ApplicationTerminationContext;
import org.apache.hadoop.yarn.server.api.AuxiliaryService;
import org.apache.hadoop.yarn.server.api.ContainerInitializationContext;
import org.apache.hadoop.yarn.server.api.ContainerTerminationContext;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerImpl;
import org.junit.Assert;
import org.junit.Test;
public class TestAuxServices {
private static final Log LOG = LogFactory.getLog(TestAuxServices.class);
private static final File TEST_DIR = new File(
System.getProperty("test.build.data",
System.getProperty("java.io.tmpdir")),
TestAuxServices.class.getName());
static class LightService extends AuxiliaryService implements Service
{
private final char idef;
private final int expected_appId;
private int remaining_init;
private int remaining_stop;
private ByteBuffer meta = null;
private ArrayList<Integer> stoppedApps;
private ContainerId containerId;
private Resource resource;
LightService(String name, char idef, int expected_appId) {
this(name, idef, expected_appId, null);
}
LightService(String name, char idef, int expected_appId, ByteBuffer meta) {
super(name);
this.idef = idef;
this.expected_appId = expected_appId;
this.meta = meta;
this.stoppedApps = new ArrayList<Integer>();
}
public ArrayList<Integer> getAppIdsStopped() {
return (ArrayList<Integer>)this.stoppedApps.clone();
}
@Override
protected void serviceInit(Configuration conf) throws Exception {
remaining_init = conf.getInt(idef + ".expected.init", 0);
remaining_stop = conf.getInt(idef + ".expected.stop", 0);
super.serviceInit(conf);
}
@Override
protected void serviceStop() throws Exception {
assertEquals(0, remaining_init);
assertEquals(0, remaining_stop);
super.serviceStop();
}
@Override
public void initializeApplication(ApplicationInitializationContext context) {
ByteBuffer data = context.getApplicationDataForService();
assertEquals(idef, data.getChar());
assertEquals(expected_appId, data.getInt());
assertEquals(expected_appId, context.getApplicationId().getId());
}
@Override
public void stopApplication(ApplicationTerminationContext context) {
stoppedApps.add(context.getApplicationId().getId());
}
@Override
public ByteBuffer getMetaData() {
return meta;
}
@Override
public void initializeContainer(
ContainerInitializationContext initContainerContext) {
containerId = initContainerContext.getContainerId();
resource = initContainerContext.getResource();
}
@Override
public void stopContainer(
ContainerTerminationContext stopContainerContext) {
containerId = stopContainerContext.getContainerId();
resource = stopContainerContext.getResource();
}
}
static class ServiceA extends LightService {
public ServiceA() {
super("A", 'A', 65, ByteBuffer.wrap("A".getBytes()));
}
}
static class ServiceB extends LightService {
public ServiceB() {
super("B", 'B', 66, ByteBuffer.wrap("B".getBytes()));
}
}
@Test
public void testAuxEventDispatch() {
Configuration conf = new Configuration();
conf.setStrings(YarnConfiguration.NM_AUX_SERVICES, new String[] { "Asrv", "Bsrv" });
conf.setClass(String.format(YarnConfiguration.NM_AUX_SERVICE_FMT, "Asrv"),
ServiceA.class, Service.class);
conf.setClass(String.format(YarnConfiguration.NM_AUX_SERVICE_FMT, "Bsrv"),
ServiceB.class, Service.class);
conf.setInt("A.expected.init", 1);
conf.setInt("B.expected.stop", 1);
final AuxServices aux = new AuxServices();
aux.init(conf);
aux.start();
ApplicationId appId1 = ApplicationId.newInstance(0, 65);
ByteBuffer buf = ByteBuffer.allocate(6);
buf.putChar('A');
buf.putInt(65);
buf.flip();
AuxServicesEvent event = new AuxServicesEvent(
AuxServicesEventType.APPLICATION_INIT, "user0", appId1, "Asrv", buf);
aux.handle(event);
ApplicationId appId2 = ApplicationId.newInstance(0, 66);
event = new AuxServicesEvent(
AuxServicesEventType.APPLICATION_STOP, "user0", appId2, "Bsrv", null);
// verify all services got the stop event
aux.handle(event);
Collection<AuxiliaryService> servs = aux.getServices();
for (AuxiliaryService serv: servs) {
ArrayList<Integer> appIds = ((LightService)serv).getAppIdsStopped();
assertEquals("app not properly stopped", 1, appIds.size());
assertTrue("wrong app stopped", appIds.contains((Integer)66));
}
for (AuxiliaryService serv : servs) {
assertNull(((LightService) serv).containerId);
assertNull(((LightService) serv).resource);
}
ApplicationAttemptId attemptId = ApplicationAttemptId.newInstance(appId1, 1);
ContainerTokenIdentifier cti = new ContainerTokenIdentifier(
ContainerId.newContainerId(attemptId, 1), "", "",
Resource.newInstance(1, 1), 0,0,0, Priority.newInstance(0), 0);
Container container = new ContainerImpl(null, null, null, null, null,
null, cti);
ContainerId containerId = container.getContainerId();
Resource resource = container.getResource();
event = new AuxServicesEvent(AuxServicesEventType.CONTAINER_INIT,container);
aux.handle(event);
for (AuxiliaryService serv : servs) {
assertEquals(containerId, ((LightService) serv).containerId);
assertEquals(resource, ((LightService) serv).resource);
((LightService) serv).containerId = null;
((LightService) serv).resource = null;
}
event = new AuxServicesEvent(AuxServicesEventType.CONTAINER_STOP, container);
aux.handle(event);
for (AuxiliaryService serv : servs) {
assertEquals(containerId, ((LightService) serv).containerId);
assertEquals(resource, ((LightService) serv).resource);
}
}
@Test
public void testAuxServices() {
Configuration conf = new Configuration();
conf.setStrings(YarnConfiguration.NM_AUX_SERVICES, new String[] { "Asrv", "Bsrv" });
conf.setClass(String.format(YarnConfiguration.NM_AUX_SERVICE_FMT, "Asrv"),
ServiceA.class, Service.class);
conf.setClass(String.format(YarnConfiguration.NM_AUX_SERVICE_FMT, "Bsrv"),
ServiceB.class, Service.class);
final AuxServices aux = new AuxServices();
aux.init(conf);
int latch = 1;
for (Service s : aux.getServices()) {
assertEquals(INITED, s.getServiceState());
if (s instanceof ServiceA) { latch *= 2; }
else if (s instanceof ServiceB) { latch *= 3; }
else fail("Unexpected service type " + s.getClass());
}
assertEquals("Invalid mix of services", 6, latch);
aux.start();
for (Service s : aux.getServices()) {
assertEquals(STARTED, s.getServiceState());
}
aux.stop();
for (Service s : aux.getServices()) {
assertEquals(STOPPED, s.getServiceState());
}
}
@Test
public void testAuxServicesMeta() {
Configuration conf = new Configuration();
conf.setStrings(YarnConfiguration.NM_AUX_SERVICES, new String[] { "Asrv", "Bsrv" });
conf.setClass(String.format(YarnConfiguration.NM_AUX_SERVICE_FMT, "Asrv"),
ServiceA.class, Service.class);
conf.setClass(String.format(YarnConfiguration.NM_AUX_SERVICE_FMT, "Bsrv"),
ServiceB.class, Service.class);
final AuxServices aux = new AuxServices();
aux.init(conf);
int latch = 1;
for (Service s : aux.getServices()) {
assertEquals(INITED, s.getServiceState());
if (s instanceof ServiceA) { latch *= 2; }
else if (s instanceof ServiceB) { latch *= 3; }
else fail("Unexpected service type " + s.getClass());
}
assertEquals("Invalid mix of services", 6, latch);
aux.start();
for (Service s : aux.getServices()) {
assertEquals(STARTED, s.getServiceState());
}
Map<String, ByteBuffer> meta = aux.getMetaData();
assertEquals(2, meta.size());
assertEquals("A", new String(meta.get("Asrv").array()));
assertEquals("B", new String(meta.get("Bsrv").array()));
aux.stop();
for (Service s : aux.getServices()) {
assertEquals(STOPPED, s.getServiceState());
}
}
@Test
public void testAuxUnexpectedStop() {
Configuration conf = new Configuration();
conf.setStrings(YarnConfiguration.NM_AUX_SERVICES, new String[] { "Asrv", "Bsrv" });
conf.setClass(String.format(YarnConfiguration.NM_AUX_SERVICE_FMT, "Asrv"),
ServiceA.class, Service.class);
conf.setClass(String.format(YarnConfiguration.NM_AUX_SERVICE_FMT, "Bsrv"),
ServiceB.class, Service.class);
final AuxServices aux = new AuxServices();
aux.init(conf);
aux.start();
Service s = aux.getServices().iterator().next();
s.stop();
assertEquals("Auxiliary service stopped, but AuxService unaffected.",
STOPPED, aux.getServiceState());
assertTrue(aux.getServices().isEmpty());
}
@Test
public void testValidAuxServiceName() {
final AuxServices aux = new AuxServices();
Configuration conf = new Configuration();
conf.setStrings(YarnConfiguration.NM_AUX_SERVICES, new String[] {"Asrv1", "Bsrv_2"});
conf.setClass(String.format(YarnConfiguration.NM_AUX_SERVICE_FMT, "Asrv1"),
ServiceA.class, Service.class);
conf.setClass(String.format(YarnConfiguration.NM_AUX_SERVICE_FMT, "Bsrv_2"),
ServiceB.class, Service.class);
try {
aux.init(conf);
} catch (Exception ex) {
Assert.fail("Should not receive the exception.");
}
//Test bad auxService Name
final AuxServices aux1 = new AuxServices();
conf.setStrings(YarnConfiguration.NM_AUX_SERVICES, new String[] {"1Asrv1"});
conf.setClass(String.format(YarnConfiguration.NM_AUX_SERVICE_FMT, "1Asrv1"),
ServiceA.class, Service.class);
try {
aux1.init(conf);
Assert.fail("Should receive the exception.");
} catch (Exception ex) {
assertTrue(ex.getMessage().contains("The ServiceName: 1Asrv1 set in " +
"yarn.nodemanager.aux-services is invalid.The valid service name " +
"should only contain a-zA-Z0-9_ and can not start with numbers"));
}
}
@Test
public void testAuxServiceRecoverySetup() throws IOException {
Configuration conf = new YarnConfiguration();
conf.setBoolean(YarnConfiguration.NM_RECOVERY_ENABLED, true);
conf.set(YarnConfiguration.NM_RECOVERY_DIR, TEST_DIR.toString());
conf.setStrings(YarnConfiguration.NM_AUX_SERVICES,
new String[] { "Asrv", "Bsrv" });
conf.setClass(String.format(YarnConfiguration.NM_AUX_SERVICE_FMT, "Asrv"),
RecoverableServiceA.class, Service.class);
conf.setClass(String.format(YarnConfiguration.NM_AUX_SERVICE_FMT, "Bsrv"),
RecoverableServiceB.class, Service.class);
try {
final AuxServices aux = new AuxServices();
aux.init(conf);
Assert.assertEquals(2, aux.getServices().size());
File auxStorageDir = new File(TEST_DIR,
AuxServices.STATE_STORE_ROOT_NAME);
Assert.assertEquals(2, auxStorageDir.listFiles().length);
aux.close();
} finally {
FileUtil.fullyDelete(TEST_DIR);
}
}
static class RecoverableAuxService extends AuxiliaryService {
static final FsPermission RECOVERY_PATH_PERMS =
new FsPermission((short)0700);
String auxName;
RecoverableAuxService(String name, String auxName) {
super(name);
this.auxName = auxName;
}
@Override
protected void serviceInit(Configuration conf) throws Exception {
super.serviceInit(conf);
Path storagePath = getRecoveryPath();
Assert.assertNotNull("Recovery path not present when aux service inits",
storagePath);
Assert.assertTrue(storagePath.toString().contains(auxName));
FileSystem fs = FileSystem.getLocal(conf);
Assert.assertTrue("Recovery path does not exist",
fs.exists(storagePath));
Assert.assertEquals("Recovery path has wrong permissions",
new FsPermission((short)0700),
fs.getFileStatus(storagePath).getPermission());
}
@Override
public void initializeApplication(
ApplicationInitializationContext initAppContext) {
}
@Override
public void stopApplication(ApplicationTerminationContext stopAppContext) {
}
@Override
public ByteBuffer getMetaData() {
return null;
}
}
static class RecoverableServiceA extends RecoverableAuxService {
RecoverableServiceA() {
super("RecoverableServiceA", "Asrv");
}
}
static class RecoverableServiceB extends RecoverableAuxService {
RecoverableServiceB() {
super("RecoverableServiceB", "Bsrv");
}
}
}
| 15,139 | 36.017115 | 90 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestNMProxy.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.nodemanager.containermanager;
import java.io.IOException;
import java.net.InetSocketAddress;
import org.apache.hadoop.fs.UnsupportedFileSystemException;
import org.apache.hadoop.io.retry.UnreliableInterface;
import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.yarn.api.ContainerManagementProtocol;
import org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusesRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusesResponse;
import org.apache.hadoop.yarn.api.protocolrecords.StartContainersRequest;
import org.apache.hadoop.yarn.api.protocolrecords.StartContainersResponse;
import org.apache.hadoop.yarn.api.protocolrecords.StopContainersRequest;
import org.apache.hadoop.yarn.api.protocolrecords.StopContainersResponse;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.client.NMProxy;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.ipc.YarnRPC;
import org.apache.hadoop.yarn.security.NMTokenIdentifier;
import org.apache.hadoop.yarn.server.nodemanager.DeletionService;
import org.apache.hadoop.yarn.server.security.ApplicationACLsManager;
import org.apache.hadoop.yarn.util.ConverterUtils;
import org.apache.hadoop.yarn.util.Records;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
public class TestNMProxy extends BaseContainerManagerTest {
public TestNMProxy() throws UnsupportedFileSystemException {
super();
}
int retryCount = 0;
boolean shouldThrowNMNotYetReadyException = false;
@Before
public void setUp() throws Exception {
containerManager.start();
containerManager.setBlockNewContainerRequests(false);
}
@Override
protected ContainerManagerImpl
createContainerManager(DeletionService delSrvc) {
return new ContainerManagerImpl(context, exec, delSrvc, nodeStatusUpdater,
metrics, new ApplicationACLsManager(conf), dirsHandler) {
@Override
public StartContainersResponse startContainers(
StartContainersRequest requests) throws YarnException, IOException {
if (retryCount < 5) {
retryCount++;
if (shouldThrowNMNotYetReadyException) {
// This causes super to throw an NMNotYetReadyException
containerManager.setBlockNewContainerRequests(true);
} else {
if (isRetryPolicyRetryForEver()) {
// Throw non network exception
throw new IOException(
new UnreliableInterface.UnreliableException());
} else {
throw new java.net.ConnectException("start container exception");
}
}
} else {
// This stops super from throwing an NMNotYetReadyException
containerManager.setBlockNewContainerRequests(false);
}
return super.startContainers(requests);
}
private boolean isRetryPolicyRetryForEver() {
return conf.getLong(
YarnConfiguration.CLIENT_NM_CONNECT_MAX_WAIT_MS, 1000) == -1;
}
@Override
public StopContainersResponse stopContainers(
StopContainersRequest requests) throws YarnException, IOException {
if (retryCount < 5) {
retryCount++;
throw new java.net.ConnectException("stop container exception");
}
return super.stopContainers(requests);
}
@Override
public GetContainerStatusesResponse getContainerStatuses(
GetContainerStatusesRequest request) throws YarnException,
IOException {
if (retryCount < 5) {
retryCount++;
throw new java.net.ConnectException("get container status exception");
}
return super.getContainerStatuses(request);
}
};
}
@Test(timeout = 20000)
public void testNMProxyRetry() throws Exception {
conf.setLong(YarnConfiguration.CLIENT_NM_CONNECT_MAX_WAIT_MS, 10000);
conf.setLong(YarnConfiguration.CLIENT_NM_CONNECT_RETRY_INTERVAL_MS, 100);
StartContainersRequest allRequests =
Records.newRecord(StartContainersRequest.class);
ContainerManagementProtocol proxy = getNMProxy();
retryCount = 0;
shouldThrowNMNotYetReadyException = false;
proxy.startContainers(allRequests);
Assert.assertEquals(5, retryCount);
retryCount = 0;
shouldThrowNMNotYetReadyException = false;
proxy.stopContainers(Records.newRecord(StopContainersRequest.class));
Assert.assertEquals(5, retryCount);
retryCount = 0;
shouldThrowNMNotYetReadyException = false;
proxy.getContainerStatuses(Records
.newRecord(GetContainerStatusesRequest.class));
Assert.assertEquals(5, retryCount);
retryCount = 0;
shouldThrowNMNotYetReadyException = true;
proxy.startContainers(allRequests);
Assert.assertEquals(5, retryCount);
}
@Test(timeout = 20000, expected = IOException.class)
public void testShouldNotRetryForeverForNonNetworkExceptionsOnNMConnections()
throws Exception {
conf.setLong(YarnConfiguration.CLIENT_NM_CONNECT_MAX_WAIT_MS, -1);
StartContainersRequest allRequests =
Records.newRecord(StartContainersRequest.class);
ContainerManagementProtocol proxy = getNMProxy();
shouldThrowNMNotYetReadyException = false;
retryCount = 0;
proxy.startContainers(allRequests);
}
private ContainerManagementProtocol getNMProxy() {
ApplicationId appId = ApplicationId.newInstance(1, 1);
ApplicationAttemptId attemptId = ApplicationAttemptId.newInstance(appId, 1);
org.apache.hadoop.yarn.api.records.Token nmToken =
context.getNMTokenSecretManager().createNMToken(attemptId,
context.getNodeId(), user);
final InetSocketAddress address =
conf.getSocketAddr(YarnConfiguration.NM_BIND_HOST,
YarnConfiguration.NM_ADDRESS, YarnConfiguration.DEFAULT_NM_ADDRESS,
YarnConfiguration.DEFAULT_NM_PORT);
Token<NMTokenIdentifier> token =
ConverterUtils.convertFromYarn(nmToken,
SecurityUtil.buildTokenService(address));
UserGroupInformation ugi = UserGroupInformation.createRemoteUser(user);
ugi.addToken(token);
return NMProxy.createNMProxy(conf, ContainerManagementProtocol.class, ugi,
YarnRPC.create(conf), address);
}
}
| 7,369 | 37.994709 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/BaseContainerManagerTest.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.nodemanager.containermanager;
import java.io.File;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.hadoop.yarn.server.nodemanager.executor.DeletionAsUserContext;
import org.junit.Assert;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileContext;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.UnsupportedFileSystemException;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.SecretManager.InvalidToken;
import org.apache.hadoop.util.NodeHealthScriptRunner;
import org.apache.hadoop.yarn.api.ContainerManagementProtocol;
import org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusesRequest;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.ContainerState;
import org.apache.hadoop.yarn.api.records.ContainerStatus;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.event.AsyncDispatcher;
import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.factories.RecordFactory;
import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
import org.apache.hadoop.yarn.security.ContainerTokenIdentifier;
import org.apache.hadoop.yarn.security.NMTokenIdentifier;
import org.apache.hadoop.yarn.server.api.ResourceTracker;
import org.apache.hadoop.yarn.server.nodemanager.ContainerExecutor;
import org.apache.hadoop.yarn.server.nodemanager.Context;
import org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor;
import org.apache.hadoop.yarn.server.nodemanager.DeletionService;
import org.apache.hadoop.yarn.server.nodemanager.LocalDirsHandlerService;
import org.apache.hadoop.yarn.server.nodemanager.LocalRMInterface;
import org.apache.hadoop.yarn.server.nodemanager.NodeHealthCheckerService;
import org.apache.hadoop.yarn.server.nodemanager.NodeManager;
import org.apache.hadoop.yarn.server.nodemanager.NodeManager.NMContext;
import org.apache.hadoop.yarn.server.nodemanager.NodeStatusUpdater;
import org.apache.hadoop.yarn.server.nodemanager.NodeStatusUpdaterImpl;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.Application;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.ApplicationState;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
import org.apache.hadoop.yarn.server.nodemanager.metrics.NodeManagerMetrics;
import org.apache.hadoop.yarn.server.nodemanager.recovery.NMNullStateStoreService;
import org.apache.hadoop.yarn.server.nodemanager.security.NMContainerTokenSecretManager;
import org.apache.hadoop.yarn.server.nodemanager.security.NMTokenSecretManagerInNM;
import org.apache.hadoop.yarn.server.security.ApplicationACLsManager;
import org.junit.After;
import org.junit.Before;
public abstract class BaseContainerManagerTest {
protected static RecordFactory recordFactory = RecordFactoryProvider
.getRecordFactory(null);
protected static FileContext localFS;
protected static File localDir;
protected static File localLogDir;
protected static File remoteLogDir;
protected static File tmpDir;
protected final NodeManagerMetrics metrics = NodeManagerMetrics.create();
public BaseContainerManagerTest() throws UnsupportedFileSystemException {
localFS = FileContext.getLocalFSFileContext();
localDir =
new File("target", this.getClass().getSimpleName() + "-localDir")
.getAbsoluteFile();
localLogDir =
new File("target", this.getClass().getSimpleName() + "-localLogDir")
.getAbsoluteFile();
remoteLogDir =
new File("target", this.getClass().getSimpleName() + "-remoteLogDir")
.getAbsoluteFile();
tmpDir = new File("target", this.getClass().getSimpleName() + "-tmpDir");
}
protected static Log LOG = LogFactory
.getLog(BaseContainerManagerTest.class);
protected static final int HTTP_PORT = 5412;
protected Configuration conf = new YarnConfiguration();
protected Context context = new NMContext(new NMContainerTokenSecretManager(
conf), new NMTokenSecretManagerInNM(), null,
new ApplicationACLsManager(conf), new NMNullStateStoreService()) {
public int getHttpPort() {
return HTTP_PORT;
};
};
protected ContainerExecutor exec;
protected DeletionService delSrvc;
protected String user = "nobody";
protected NodeHealthCheckerService nodeHealthChecker;
protected LocalDirsHandlerService dirsHandler;
protected final long DUMMY_RM_IDENTIFIER = 1234;
protected NodeStatusUpdater nodeStatusUpdater = new NodeStatusUpdaterImpl(
context, new AsyncDispatcher(), null, metrics) {
@Override
protected ResourceTracker getRMClient() {
return new LocalRMInterface();
};
@Override
protected void stopRMProxy() {
return;
}
@Override
protected void startStatusUpdater() {
return; // Don't start any updating thread.
}
@Override
public long getRMIdentifier() {
// There is no real RM registration, simulate and set RMIdentifier
return DUMMY_RM_IDENTIFIER;
}
};
protected ContainerManagerImpl containerManager = null;
protected ContainerExecutor createContainerExecutor() {
DefaultContainerExecutor exec = new DefaultContainerExecutor();
exec.setConf(conf);
return exec;
}
@Before
public void setup() throws IOException {
localFS.delete(new Path(localDir.getAbsolutePath()), true);
localFS.delete(new Path(tmpDir.getAbsolutePath()), true);
localFS.delete(new Path(localLogDir.getAbsolutePath()), true);
localFS.delete(new Path(remoteLogDir.getAbsolutePath()), true);
localDir.mkdir();
tmpDir.mkdir();
localLogDir.mkdir();
remoteLogDir.mkdir();
LOG.info("Created localDir in " + localDir.getAbsolutePath());
LOG.info("Created tmpDir in " + tmpDir.getAbsolutePath());
String bindAddress = "0.0.0.0:12345";
conf.set(YarnConfiguration.NM_ADDRESS, bindAddress);
conf.set(YarnConfiguration.NM_LOCAL_DIRS, localDir.getAbsolutePath());
conf.set(YarnConfiguration.NM_LOG_DIRS, localLogDir.getAbsolutePath());
conf.set(YarnConfiguration.NM_REMOTE_APP_LOG_DIR, remoteLogDir.getAbsolutePath());
conf.setLong(YarnConfiguration.NM_LOG_RETAIN_SECONDS, 1);
// Default delSrvc
delSrvc = createDeletionService();
delSrvc.init(conf);
exec = createContainerExecutor();
dirsHandler = new LocalDirsHandlerService();
nodeHealthChecker = new NodeHealthCheckerService(
NodeManager.getNodeHealthScriptRunner(conf), dirsHandler);
nodeHealthChecker.init(conf);
containerManager = createContainerManager(delSrvc);
((NMContext)context).setContainerManager(containerManager);
nodeStatusUpdater.init(conf);
containerManager.init(conf);
nodeStatusUpdater.start();
}
protected ContainerManagerImpl
createContainerManager(DeletionService delSrvc) {
return new ContainerManagerImpl(context, exec, delSrvc, nodeStatusUpdater,
metrics, new ApplicationACLsManager(conf), dirsHandler) {
@Override
public void
setBlockNewContainerRequests(boolean blockNewContainerRequests) {
// do nothing
}
@Override
protected void authorizeGetAndStopContainerRequest(ContainerId containerId,
Container container, boolean stopRequest, NMTokenIdentifier identifier) throws YarnException {
// do nothing
}
@Override
protected void authorizeUser(UserGroupInformation remoteUgi,
NMTokenIdentifier nmTokenIdentifier) {
// do nothing
}
@Override
protected void authorizeStartRequest(
NMTokenIdentifier nmTokenIdentifier,
ContainerTokenIdentifier containerTokenIdentifier) throws YarnException {
// do nothing
}
@Override
protected void updateNMTokenIdentifier(
NMTokenIdentifier nmTokenIdentifier) throws InvalidToken {
// Do nothing
}
@Override
public Map<String, ByteBuffer> getAuxServiceMetaData() {
Map<String, ByteBuffer> serviceData = new HashMap<String, ByteBuffer>();
serviceData.put("AuxService1",
ByteBuffer.wrap("AuxServiceMetaData1".getBytes()));
serviceData.put("AuxService2",
ByteBuffer.wrap("AuxServiceMetaData2".getBytes()));
return serviceData;
}
@Override
protected NMTokenIdentifier selectNMTokenIdentifier(
UserGroupInformation remoteUgi) {
return new NMTokenIdentifier();
}
};
}
protected DeletionService createDeletionService() {
return new DeletionService(exec) {
@Override
public void delete(String user, Path subDir, Path... baseDirs) {
// Don't do any deletions.
LOG.info("Psuedo delete: user - " + user + ", subDir - " + subDir
+ ", baseDirs - " + baseDirs);
};
};
}
@After
public void tearDown() throws IOException, InterruptedException {
if (containerManager != null) {
containerManager.stop();
}
createContainerExecutor().deleteAsUser(new DeletionAsUserContext.Builder()
.setUser(user)
.setSubDir(new Path(localDir.getAbsolutePath()))
.setBasedirs(new Path[] {})
.build());
}
public static void waitForContainerState(ContainerManagementProtocol containerManager,
ContainerId containerID, ContainerState finalState)
throws InterruptedException, YarnException, IOException {
waitForContainerState(containerManager, containerID, finalState, 20);
}
public static void waitForContainerState(ContainerManagementProtocol containerManager,
ContainerId containerID, ContainerState finalState, int timeOutMax)
throws InterruptedException, YarnException, IOException {
List<ContainerId> list = new ArrayList<ContainerId>();
list.add(containerID);
GetContainerStatusesRequest request =
GetContainerStatusesRequest.newInstance(list);
ContainerStatus containerStatus =
containerManager.getContainerStatuses(request).getContainerStatuses()
.get(0);
int timeoutSecs = 0;
while (!containerStatus.getState().equals(finalState)
&& timeoutSecs++ < timeOutMax) {
Thread.sleep(1000);
LOG.info("Waiting for container to get into state " + finalState
+ ". Current state is " + containerStatus.getState());
containerStatus = containerManager.getContainerStatuses(request).getContainerStatuses().get(0);
}
LOG.info("Container state is " + containerStatus.getState());
Assert.assertEquals("ContainerState is not correct (timedout)",
finalState, containerStatus.getState());
}
static void waitForApplicationState(ContainerManagerImpl containerManager,
ApplicationId appID, ApplicationState finalState)
throws InterruptedException {
// Wait for app-finish
Application app =
containerManager.getContext().getApplications().get(appID);
int timeout = 0;
while (!(app.getApplicationState().equals(finalState))
&& timeout++ < 15) {
LOG.info("Waiting for app to reach " + finalState
+ ".. Current state is "
+ app.getApplicationState());
Thread.sleep(1000);
}
Assert.assertTrue("App is not in " + finalState + " yet!! Timedout!!",
app.getApplicationState().equals(finalState));
}
}
| 12,600 | 39.003175 | 106 |
java
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.