repo
stringlengths 1
191
⌀ | file
stringlengths 23
351
| code
stringlengths 0
5.32M
| file_length
int64 0
5.32M
| avg_line_length
float64 0
2.9k
| max_line_length
int64 0
288k
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/client/ClientSCMProtocolPBClientImpl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.api.impl.pb.client;
import java.io.Closeable;
import java.io.IOException;
import java.net.InetSocketAddress;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.ipc.ProtobufRpcEngine;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.yarn.api.ClientSCMProtocol;
import org.apache.hadoop.yarn.api.ClientSCMProtocolPB;
import org.apache.hadoop.yarn.api.protocolrecords.ReleaseSharedCacheResourceRequest;
import org.apache.hadoop.yarn.api.protocolrecords.ReleaseSharedCacheResourceResponse;
import org.apache.hadoop.yarn.api.protocolrecords.UseSharedCacheResourceRequest;
import org.apache.hadoop.yarn.api.protocolrecords.UseSharedCacheResourceResponse;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.ReleaseSharedCacheResourceRequestPBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.ReleaseSharedCacheResourceResponsePBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.UseSharedCacheResourceRequestPBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.UseSharedCacheResourceResponsePBImpl;
import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.ipc.RPCUtil;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.ReleaseSharedCacheResourceRequestProto;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.UseSharedCacheResourceRequestProto;
import com.google.protobuf.ServiceException;
public class ClientSCMProtocolPBClientImpl implements ClientSCMProtocol,
Closeable {
private ClientSCMProtocolPB proxy;
public ClientSCMProtocolPBClientImpl(long clientVersion,
InetSocketAddress addr, Configuration conf) throws IOException {
RPC.setProtocolEngine(conf, ClientSCMProtocolPB.class,
ProtobufRpcEngine.class);
proxy = RPC.getProxy(ClientSCMProtocolPB.class, clientVersion, addr, conf);
}
@Override
public void close() {
if (this.proxy != null) {
RPC.stopProxy(this.proxy);
this.proxy = null;
}
}
@Override
public UseSharedCacheResourceResponse use(
UseSharedCacheResourceRequest request) throws YarnException, IOException {
UseSharedCacheResourceRequestProto requestProto =
((UseSharedCacheResourceRequestPBImpl) request).getProto();
try {
return new UseSharedCacheResourceResponsePBImpl(proxy.use(null,
requestProto));
} catch (ServiceException e) {
RPCUtil.unwrapAndThrowException(e);
return null;
}
}
@Override
public ReleaseSharedCacheResourceResponse release(
ReleaseSharedCacheResourceRequest request) throws YarnException,
IOException {
ReleaseSharedCacheResourceRequestProto requestProto =
((ReleaseSharedCacheResourceRequestPBImpl) request).getProto();
try {
return new ReleaseSharedCacheResourceResponsePBImpl(proxy.release(null,
requestProto));
} catch (ServiceException e) {
RPCUtil.unwrapAndThrowException(e);
return null;
}
}
}
| 3,789 | 39.319149 | 99 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/client/ApplicationClientProtocolPBClientImpl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.api.impl.pb.client;
import java.io.Closeable;
import java.io.IOException;
import java.net.InetSocketAddress;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.ipc.ProtobufRpcEngine;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.security.proto.SecurityProtos.CancelDelegationTokenRequestProto;
import org.apache.hadoop.security.proto.SecurityProtos.GetDelegationTokenRequestProto;
import org.apache.hadoop.security.proto.SecurityProtos.RenewDelegationTokenRequestProto;
import org.apache.hadoop.yarn.api.ApplicationClientProtocol;
import org.apache.hadoop.yarn.api.ApplicationClientProtocolPB;
import org.apache.hadoop.yarn.api.protocolrecords.CancelDelegationTokenRequest;
import org.apache.hadoop.yarn.api.protocolrecords.CancelDelegationTokenResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptReportRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptReportResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptsRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptsResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetClusterMetricsRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetClusterMetricsResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodeLabelsRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodeLabelsResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodesRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodesResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetContainerReportRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetContainerReportResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetContainersRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetContainersResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetDelegationTokenRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetDelegationTokenResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetLabelsToNodesRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetLabelsToNodesResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetNodesToLabelsRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetNodesToLabelsResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetQueueInfoRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetQueueInfoResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetQueueUserAclsInfoRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetQueueUserAclsInfoResponse;
import org.apache.hadoop.yarn.api.protocolrecords.KillApplicationRequest;
import org.apache.hadoop.yarn.api.protocolrecords.KillApplicationResponse;
import org.apache.hadoop.yarn.api.protocolrecords.MoveApplicationAcrossQueuesRequest;
import org.apache.hadoop.yarn.api.protocolrecords.MoveApplicationAcrossQueuesResponse;
import org.apache.hadoop.yarn.api.protocolrecords.RenewDelegationTokenRequest;
import org.apache.hadoop.yarn.api.protocolrecords.RenewDelegationTokenResponse;
import org.apache.hadoop.yarn.api.protocolrecords.ReservationDeleteRequest;
import org.apache.hadoop.yarn.api.protocolrecords.ReservationDeleteResponse;
import org.apache.hadoop.yarn.api.protocolrecords.ReservationSubmissionRequest;
import org.apache.hadoop.yarn.api.protocolrecords.ReservationSubmissionResponse;
import org.apache.hadoop.yarn.api.protocolrecords.ReservationUpdateRequest;
import org.apache.hadoop.yarn.api.protocolrecords.ReservationUpdateResponse;
import org.apache.hadoop.yarn.api.protocolrecords.SubmitApplicationRequest;
import org.apache.hadoop.yarn.api.protocolrecords.SubmitApplicationResponse;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.CancelDelegationTokenRequestPBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.CancelDelegationTokenResponsePBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetApplicationAttemptReportRequestPBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetApplicationAttemptReportResponsePBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetApplicationAttemptsRequestPBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetApplicationAttemptsResponsePBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetApplicationReportRequestPBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetApplicationReportResponsePBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetApplicationsRequestPBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetApplicationsResponsePBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetClusterMetricsRequestPBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetClusterMetricsResponsePBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetClusterNodeLabelsRequestPBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetClusterNodeLabelsResponsePBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetClusterNodesRequestPBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetClusterNodesResponsePBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetContainerReportRequestPBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetContainerReportResponsePBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetContainersRequestPBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetContainersResponsePBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetDelegationTokenRequestPBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetDelegationTokenResponsePBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetLabelsToNodesRequestPBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetLabelsToNodesResponsePBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetNewApplicationRequestPBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetNewApplicationResponsePBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetNodesToLabelsRequestPBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetNodesToLabelsResponsePBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetQueueInfoRequestPBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetQueueInfoResponsePBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetQueueUserAclsInfoRequestPBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetQueueUserAclsInfoResponsePBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.KillApplicationRequestPBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.KillApplicationResponsePBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.MoveApplicationAcrossQueuesRequestPBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.MoveApplicationAcrossQueuesResponsePBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.RenewDelegationTokenRequestPBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.RenewDelegationTokenResponsePBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.ReservationDeleteRequestPBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.ReservationDeleteResponsePBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.ReservationSubmissionRequestPBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.ReservationSubmissionResponsePBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.ReservationUpdateRequestPBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.ReservationUpdateResponsePBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.SubmitApplicationRequestPBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.SubmitApplicationResponsePBImpl;
import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.ipc.RPCUtil;
import org.apache.hadoop.yarn.proto.YarnServiceProtos;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetApplicationAttemptReportRequestProto;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetApplicationAttemptsRequestProto;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetApplicationReportRequestProto;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetApplicationsRequestProto;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetClusterMetricsRequestProto;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetClusterNodeLabelsRequestProto;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetClusterNodesRequestProto;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetContainerReportRequestProto;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetContainersRequestProto;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetNewApplicationRequestProto;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetQueueInfoRequestProto;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetQueueUserAclsInfoRequestProto;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.KillApplicationRequestProto;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.MoveApplicationAcrossQueuesRequestProto;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.ReservationDeleteRequestProto;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.ReservationSubmissionRequestProto;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.ReservationUpdateRequestProto;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.SubmitApplicationRequestProto;
import com.google.protobuf.ServiceException;
@Private
public class ApplicationClientProtocolPBClientImpl implements ApplicationClientProtocol,
Closeable {
private ApplicationClientProtocolPB proxy;
public ApplicationClientProtocolPBClientImpl(long clientVersion,
InetSocketAddress addr, Configuration conf) throws IOException {
RPC.setProtocolEngine(conf, ApplicationClientProtocolPB.class,
ProtobufRpcEngine.class);
proxy = RPC.getProxy(ApplicationClientProtocolPB.class, clientVersion, addr, conf);
}
@Override
public void close() {
if (this.proxy != null) {
RPC.stopProxy(this.proxy);
}
}
@Override
public KillApplicationResponse forceKillApplication(
KillApplicationRequest request) throws YarnException, IOException {
KillApplicationRequestProto requestProto =
((KillApplicationRequestPBImpl) request).getProto();
try {
return new KillApplicationResponsePBImpl(proxy.forceKillApplication(null,
requestProto));
} catch (ServiceException e) {
RPCUtil.unwrapAndThrowException(e);
return null;
}
}
@Override
public GetApplicationReportResponse getApplicationReport(
GetApplicationReportRequest request) throws YarnException,
IOException {
GetApplicationReportRequestProto requestProto =
((GetApplicationReportRequestPBImpl) request).getProto();
try {
return new GetApplicationReportResponsePBImpl(proxy.getApplicationReport(
null, requestProto));
} catch (ServiceException e) {
RPCUtil.unwrapAndThrowException(e);
return null;
}
}
@Override
public GetClusterMetricsResponse getClusterMetrics(
GetClusterMetricsRequest request) throws YarnException,
IOException {
GetClusterMetricsRequestProto requestProto =
((GetClusterMetricsRequestPBImpl) request).getProto();
try {
return new GetClusterMetricsResponsePBImpl(proxy.getClusterMetrics(null,
requestProto));
} catch (ServiceException e) {
RPCUtil.unwrapAndThrowException(e);
return null;
}
}
@Override
public GetNewApplicationResponse getNewApplication(
GetNewApplicationRequest request) throws YarnException,
IOException {
GetNewApplicationRequestProto requestProto =
((GetNewApplicationRequestPBImpl) request).getProto();
try {
return new GetNewApplicationResponsePBImpl(proxy.getNewApplication(null,
requestProto));
} catch (ServiceException e) {
RPCUtil.unwrapAndThrowException(e);
return null;
}
}
@Override
public SubmitApplicationResponse submitApplication(
SubmitApplicationRequest request) throws YarnException,
IOException {
SubmitApplicationRequestProto requestProto =
((SubmitApplicationRequestPBImpl) request).getProto();
try {
return new SubmitApplicationResponsePBImpl(proxy.submitApplication(null,
requestProto));
} catch (ServiceException e) {
RPCUtil.unwrapAndThrowException(e);
return null;
}
}
@Override
public GetApplicationsResponse getApplications(
GetApplicationsRequest request) throws YarnException,
IOException {
GetApplicationsRequestProto requestProto =
((GetApplicationsRequestPBImpl) request).getProto();
try {
return new GetApplicationsResponsePBImpl(proxy.getApplications(
null, requestProto));
} catch (ServiceException e) {
RPCUtil.unwrapAndThrowException(e);
return null;
}
}
@Override
public GetClusterNodesResponse
getClusterNodes(GetClusterNodesRequest request)
throws YarnException, IOException {
GetClusterNodesRequestProto requestProto =
((GetClusterNodesRequestPBImpl) request).getProto();
try {
return new GetClusterNodesResponsePBImpl(proxy.getClusterNodes(null,
requestProto));
} catch (ServiceException e) {
RPCUtil.unwrapAndThrowException(e);
return null;
}
}
@Override
public GetQueueInfoResponse getQueueInfo(GetQueueInfoRequest request)
throws YarnException, IOException {
GetQueueInfoRequestProto requestProto =
((GetQueueInfoRequestPBImpl) request).getProto();
try {
return new GetQueueInfoResponsePBImpl(proxy.getQueueInfo(null,
requestProto));
} catch (ServiceException e) {
RPCUtil.unwrapAndThrowException(e);
return null;
}
}
@Override
public GetQueueUserAclsInfoResponse getQueueUserAcls(
GetQueueUserAclsInfoRequest request) throws YarnException,
IOException {
GetQueueUserAclsInfoRequestProto requestProto =
((GetQueueUserAclsInfoRequestPBImpl) request).getProto();
try {
return new GetQueueUserAclsInfoResponsePBImpl(proxy.getQueueUserAcls(
null, requestProto));
} catch (ServiceException e) {
RPCUtil.unwrapAndThrowException(e);
return null;
}
}
@Override
public GetDelegationTokenResponse getDelegationToken(
GetDelegationTokenRequest request) throws YarnException,
IOException {
GetDelegationTokenRequestProto requestProto =
((GetDelegationTokenRequestPBImpl) request).getProto();
try {
return new GetDelegationTokenResponsePBImpl(proxy.getDelegationToken(
null, requestProto));
} catch (ServiceException e) {
RPCUtil.unwrapAndThrowException(e);
return null;
}
}
@Override
public RenewDelegationTokenResponse renewDelegationToken(
RenewDelegationTokenRequest request) throws YarnException,
IOException {
RenewDelegationTokenRequestProto requestProto =
((RenewDelegationTokenRequestPBImpl) request).getProto();
try {
return new RenewDelegationTokenResponsePBImpl(proxy.renewDelegationToken(
null, requestProto));
} catch (ServiceException e) {
RPCUtil.unwrapAndThrowException(e);
return null;
}
}
@Override
public CancelDelegationTokenResponse cancelDelegationToken(
CancelDelegationTokenRequest request) throws YarnException,
IOException {
CancelDelegationTokenRequestProto requestProto =
((CancelDelegationTokenRequestPBImpl) request).getProto();
try {
return new CancelDelegationTokenResponsePBImpl(
proxy.cancelDelegationToken(null, requestProto));
} catch (ServiceException e) {
RPCUtil.unwrapAndThrowException(e);
return null;
}
}
@Override
public MoveApplicationAcrossQueuesResponse moveApplicationAcrossQueues(
MoveApplicationAcrossQueuesRequest request) throws YarnException,
IOException {
MoveApplicationAcrossQueuesRequestProto requestProto =
((MoveApplicationAcrossQueuesRequestPBImpl) request).getProto();
try {
return new MoveApplicationAcrossQueuesResponsePBImpl(
proxy.moveApplicationAcrossQueues(null, requestProto));
} catch (ServiceException e) {
RPCUtil.unwrapAndThrowException(e);
return null;
}
}
@Override
public GetApplicationAttemptReportResponse getApplicationAttemptReport(
GetApplicationAttemptReportRequest request) throws YarnException,
IOException {
GetApplicationAttemptReportRequestProto requestProto =
((GetApplicationAttemptReportRequestPBImpl) request).getProto();
try {
return new GetApplicationAttemptReportResponsePBImpl(
proxy.getApplicationAttemptReport(null, requestProto));
} catch (ServiceException e) {
RPCUtil.unwrapAndThrowException(e);
return null;
}
}
@Override
public GetApplicationAttemptsResponse getApplicationAttempts(
GetApplicationAttemptsRequest request) throws YarnException, IOException {
GetApplicationAttemptsRequestProto requestProto =
((GetApplicationAttemptsRequestPBImpl) request).getProto();
try {
return new GetApplicationAttemptsResponsePBImpl(
proxy.getApplicationAttempts(null, requestProto));
} catch (ServiceException e) {
RPCUtil.unwrapAndThrowException(e);
return null;
}
}
@Override
public GetContainerReportResponse getContainerReport(
GetContainerReportRequest request) throws YarnException, IOException {
GetContainerReportRequestProto requestProto =
((GetContainerReportRequestPBImpl) request).getProto();
try {
return new GetContainerReportResponsePBImpl(proxy.getContainerReport(
null, requestProto));
} catch (ServiceException e) {
RPCUtil.unwrapAndThrowException(e);
return null;
}
}
@Override
public GetContainersResponse getContainers(GetContainersRequest request)
throws YarnException, IOException {
GetContainersRequestProto requestProto =
((GetContainersRequestPBImpl) request).getProto();
try {
return new GetContainersResponsePBImpl(proxy.getContainers(null,
requestProto));
} catch (ServiceException e) {
RPCUtil.unwrapAndThrowException(e);
return null;
}
}
@Override
public ReservationSubmissionResponse submitReservation(ReservationSubmissionRequest request)
throws YarnException, IOException {
ReservationSubmissionRequestProto requestProto =
((ReservationSubmissionRequestPBImpl) request).getProto();
try {
return new ReservationSubmissionResponsePBImpl(proxy.submitReservation(null,
requestProto));
} catch (ServiceException e) {
RPCUtil.unwrapAndThrowException(e);
return null;
}
}
@Override
public ReservationUpdateResponse updateReservation(ReservationUpdateRequest request)
throws YarnException, IOException {
ReservationUpdateRequestProto requestProto =
((ReservationUpdateRequestPBImpl) request).getProto();
try {
return new ReservationUpdateResponsePBImpl(proxy.updateReservation(null,
requestProto));
} catch (ServiceException e) {
RPCUtil.unwrapAndThrowException(e);
return null;
}
}
@Override
public ReservationDeleteResponse deleteReservation(ReservationDeleteRequest request)
throws YarnException, IOException {
ReservationDeleteRequestProto requestProto =
((ReservationDeleteRequestPBImpl) request).getProto();
try {
return new ReservationDeleteResponsePBImpl(proxy.deleteReservation(null,
requestProto));
} catch (ServiceException e) {
RPCUtil.unwrapAndThrowException(e);
return null;
}
}
@Override
public GetNodesToLabelsResponse getNodeToLabels(
GetNodesToLabelsRequest request)
throws YarnException, IOException {
YarnServiceProtos.GetNodesToLabelsRequestProto
requestProto =
((GetNodesToLabelsRequestPBImpl) request).getProto();
try {
return new GetNodesToLabelsResponsePBImpl(proxy.getNodeToLabels(
null, requestProto));
} catch (ServiceException e) {
RPCUtil.unwrapAndThrowException(e);
return null;
}
}
@Override
public GetLabelsToNodesResponse getLabelsToNodes(
GetLabelsToNodesRequest request)
throws YarnException, IOException {
YarnServiceProtos.GetLabelsToNodesRequestProto requestProto =
((GetLabelsToNodesRequestPBImpl) request).getProto();
try {
return new GetLabelsToNodesResponsePBImpl(proxy.getLabelsToNodes(
null, requestProto));
} catch (ServiceException e) {
RPCUtil.unwrapAndThrowException(e);
return null;
}
}
@Override
public GetClusterNodeLabelsResponse getClusterNodeLabels(
GetClusterNodeLabelsRequest request) throws YarnException, IOException {
GetClusterNodeLabelsRequestProto
requestProto =
((GetClusterNodeLabelsRequestPBImpl) request).getProto();
try {
return new GetClusterNodeLabelsResponsePBImpl(proxy.getClusterNodeLabels(
null, requestProto));
} catch (ServiceException e) {
RPCUtil.unwrapAndThrowException(e);
return null;
}
}
}
| 22,972 | 43.956947 | 100 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/client/ApplicationMasterProtocolPBClientImpl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.api.impl.pb.client;
import java.io.Closeable;
import java.io.IOException;
import java.net.InetSocketAddress;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.ipc.ProtobufRpcEngine;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.yarn.api.ApplicationMasterProtocol;
import org.apache.hadoop.yarn.api.ApplicationMasterProtocolPB;
import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest;
import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse;
import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterRequest;
import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterResponse;
import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterRequest;
import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterResponse;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.AllocateRequestPBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.AllocateResponsePBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.FinishApplicationMasterRequestPBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.FinishApplicationMasterResponsePBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.RegisterApplicationMasterRequestPBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.RegisterApplicationMasterResponsePBImpl;
import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.ipc.RPCUtil;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.AllocateRequestProto;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.FinishApplicationMasterRequestProto;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.RegisterApplicationMasterRequestProto;
import com.google.protobuf.ServiceException;
@Private
public class ApplicationMasterProtocolPBClientImpl implements ApplicationMasterProtocol, Closeable {
private ApplicationMasterProtocolPB proxy;
public ApplicationMasterProtocolPBClientImpl(long clientVersion, InetSocketAddress addr,
Configuration conf) throws IOException {
RPC.setProtocolEngine(conf, ApplicationMasterProtocolPB.class, ProtobufRpcEngine.class);
proxy =
(ApplicationMasterProtocolPB) RPC.getProxy(ApplicationMasterProtocolPB.class, clientVersion,
addr, conf);
}
@Override
public void close() {
if (this.proxy != null) {
RPC.stopProxy(this.proxy);
}
}
@Override
public AllocateResponse allocate(AllocateRequest request)
throws YarnException, IOException {
AllocateRequestProto requestProto =
((AllocateRequestPBImpl) request).getProto();
try {
return new AllocateResponsePBImpl(proxy.allocate(null, requestProto));
} catch (ServiceException e) {
RPCUtil.unwrapAndThrowException(e);
return null;
}
}
@Override
public FinishApplicationMasterResponse finishApplicationMaster(
FinishApplicationMasterRequest request) throws YarnException,
IOException {
FinishApplicationMasterRequestProto requestProto =
((FinishApplicationMasterRequestPBImpl) request).getProto();
try {
return new FinishApplicationMasterResponsePBImpl(
proxy.finishApplicationMaster(null, requestProto));
} catch (ServiceException e) {
RPCUtil.unwrapAndThrowException(e);
return null;
}
}
@Override
public RegisterApplicationMasterResponse registerApplicationMaster(
RegisterApplicationMasterRequest request) throws YarnException,
IOException {
RegisterApplicationMasterRequestProto requestProto =
((RegisterApplicationMasterRequestPBImpl) request).getProto();
try {
return new RegisterApplicationMasterResponsePBImpl(
proxy.registerApplicationMaster(null, requestProto));
} catch (ServiceException e) {
RPCUtil.unwrapAndThrowException(e);
return null;
}
}
}
| 4,798 | 41.096491 | 100 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/client/ApplicationHistoryProtocolPBClientImpl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.api.impl.pb.client;
import java.io.Closeable;
import java.io.IOException;
import java.net.InetSocketAddress;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.ipc.ProtobufRpcEngine;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.security.proto.SecurityProtos.CancelDelegationTokenRequestProto;
import org.apache.hadoop.security.proto.SecurityProtos.GetDelegationTokenRequestProto;
import org.apache.hadoop.security.proto.SecurityProtos.RenewDelegationTokenRequestProto;
import org.apache.hadoop.yarn.api.ApplicationClientProtocolPB;
import org.apache.hadoop.yarn.api.ApplicationHistoryProtocol;
import org.apache.hadoop.yarn.api.ApplicationHistoryProtocolPB;
import org.apache.hadoop.yarn.api.protocolrecords.CancelDelegationTokenRequest;
import org.apache.hadoop.yarn.api.protocolrecords.CancelDelegationTokenResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptReportRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptReportResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptsRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptsResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetContainerReportRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetContainerReportResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetContainersRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetContainersResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetDelegationTokenRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetDelegationTokenResponse;
import org.apache.hadoop.yarn.api.protocolrecords.RenewDelegationTokenRequest;
import org.apache.hadoop.yarn.api.protocolrecords.RenewDelegationTokenResponse;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.CancelDelegationTokenRequestPBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.CancelDelegationTokenResponsePBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetApplicationAttemptReportRequestPBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetApplicationAttemptReportResponsePBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetApplicationAttemptsRequestPBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetApplicationAttemptsResponsePBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetApplicationReportRequestPBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetApplicationReportResponsePBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetApplicationsRequestPBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetApplicationsResponsePBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetContainerReportRequestPBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetContainerReportResponsePBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetContainersRequestPBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetContainersResponsePBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetDelegationTokenRequestPBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetDelegationTokenResponsePBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.RenewDelegationTokenRequestPBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.RenewDelegationTokenResponsePBImpl;
import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.ipc.RPCUtil;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetApplicationsRequestProto;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetApplicationReportRequestProto;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetApplicationAttemptsRequestProto;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetApplicationAttemptReportRequestProto;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetContainerReportRequestProto;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetContainersRequestProto;
import com.google.protobuf.ServiceException;
public class ApplicationHistoryProtocolPBClientImpl implements
ApplicationHistoryProtocol, Closeable {
private ApplicationHistoryProtocolPB proxy;
public ApplicationHistoryProtocolPBClientImpl(long clientVersion,
InetSocketAddress addr, Configuration conf) throws IOException {
RPC.setProtocolEngine(conf, ApplicationHistoryProtocolPB.class,
ProtobufRpcEngine.class);
proxy =
RPC.getProxy(ApplicationHistoryProtocolPB.class, clientVersion, addr,
conf);
}
@Override
public void close() throws IOException {
if (this.proxy != null) {
RPC.stopProxy(this.proxy);
}
}
@Override
public GetApplicationReportResponse getApplicationReport(
GetApplicationReportRequest request) throws YarnException, IOException {
GetApplicationReportRequestProto requestProto =
((GetApplicationReportRequestPBImpl) request).getProto();
try {
return new GetApplicationReportResponsePBImpl(proxy.getApplicationReport(
null, requestProto));
} catch (ServiceException e) {
RPCUtil.unwrapAndThrowException(e);
return null;
}
}
@Override
public GetApplicationsResponse
getApplications(GetApplicationsRequest request) throws YarnException,
IOException {
GetApplicationsRequestProto requestProto =
((GetApplicationsRequestPBImpl) request).getProto();
try {
return new GetApplicationsResponsePBImpl(proxy.getApplications(null,
requestProto));
} catch (ServiceException e) {
RPCUtil.unwrapAndThrowException(e);
return null;
}
}
@Override
public GetApplicationAttemptReportResponse getApplicationAttemptReport(
GetApplicationAttemptReportRequest request) throws YarnException,
IOException {
GetApplicationAttemptReportRequestProto requestProto =
((GetApplicationAttemptReportRequestPBImpl) request).getProto();
try {
return new GetApplicationAttemptReportResponsePBImpl(
proxy.getApplicationAttemptReport(null, requestProto));
} catch (ServiceException e) {
RPCUtil.unwrapAndThrowException(e);
return null;
}
}
@Override
public GetApplicationAttemptsResponse getApplicationAttempts(
GetApplicationAttemptsRequest request) throws YarnException, IOException {
GetApplicationAttemptsRequestProto requestProto =
((GetApplicationAttemptsRequestPBImpl) request).getProto();
try {
return new GetApplicationAttemptsResponsePBImpl(
proxy.getApplicationAttempts(null, requestProto));
} catch (ServiceException e) {
RPCUtil.unwrapAndThrowException(e);
return null;
}
}
@Override
public GetContainerReportResponse getContainerReport(
GetContainerReportRequest request) throws YarnException, IOException {
GetContainerReportRequestProto requestProto =
((GetContainerReportRequestPBImpl) request).getProto();
try {
return new GetContainerReportResponsePBImpl(proxy.getContainerReport(
null, requestProto));
} catch (ServiceException e) {
RPCUtil.unwrapAndThrowException(e);
return null;
}
}
@Override
public GetContainersResponse getContainers(GetContainersRequest request)
throws YarnException, IOException {
GetContainersRequestProto requestProto =
((GetContainersRequestPBImpl) request).getProto();
try {
return new GetContainersResponsePBImpl(proxy.getContainers(null,
requestProto));
} catch (ServiceException e) {
RPCUtil.unwrapAndThrowException(e);
return null;
}
}
@Override
public GetDelegationTokenResponse getDelegationToken(
GetDelegationTokenRequest request) throws YarnException, IOException {
GetDelegationTokenRequestProto requestProto =
((GetDelegationTokenRequestPBImpl) request).getProto();
try {
return new GetDelegationTokenResponsePBImpl(proxy.getDelegationToken(
null, requestProto));
} catch (ServiceException e) {
RPCUtil.unwrapAndThrowException(e);
return null;
}
}
@Override
public RenewDelegationTokenResponse renewDelegationToken(
RenewDelegationTokenRequest request) throws YarnException, IOException {
RenewDelegationTokenRequestProto requestProto =
((RenewDelegationTokenRequestPBImpl) request).getProto();
try {
return new RenewDelegationTokenResponsePBImpl(proxy.renewDelegationToken(
null, requestProto));
} catch (ServiceException e) {
RPCUtil.unwrapAndThrowException(e);
return null;
}
}
@Override
public CancelDelegationTokenResponse cancelDelegationToken(
CancelDelegationTokenRequest request) throws YarnException, IOException {
CancelDelegationTokenRequestProto requestProto =
((CancelDelegationTokenRequestPBImpl) request).getProto();
try {
return new CancelDelegationTokenResponsePBImpl(
proxy.cancelDelegationToken(null, requestProto));
} catch (ServiceException e) {
RPCUtil.unwrapAndThrowException(e);
return null;
}
}
}
| 10,362 | 43.861472 | 100 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/client/ContainerManagementProtocolPBClientImpl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.api.impl.pb.client;
import java.io.Closeable;
import java.io.IOException;
import java.net.InetSocketAddress;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.ipc.ProtobufRpcEngine;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.yarn.api.ContainerManagementProtocol;
import org.apache.hadoop.yarn.api.ContainerManagementProtocolPB;
import org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusesRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusesResponse;
import org.apache.hadoop.yarn.api.protocolrecords.StartContainersRequest;
import org.apache.hadoop.yarn.api.protocolrecords.StartContainersResponse;
import org.apache.hadoop.yarn.api.protocolrecords.StopContainersRequest;
import org.apache.hadoop.yarn.api.protocolrecords.StopContainersResponse;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetContainerStatusesRequestPBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetContainerStatusesResponsePBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.StartContainersRequestPBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.StartContainersResponsePBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.StopContainersRequestPBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.StopContainersResponsePBImpl;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.ipc.RPCUtil;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetContainerStatusesRequestProto;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.StartContainersRequestProto;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.StopContainersRequestProto;
import com.google.protobuf.ServiceException;
@Private
public class ContainerManagementProtocolPBClientImpl implements ContainerManagementProtocol,
Closeable {
// Not a documented config. Only used for tests
static final String NM_COMMAND_TIMEOUT = YarnConfiguration.YARN_PREFIX
+ "rpc.nm-command-timeout";
/**
* Maximum of 1 minute timeout for a Node to react to the command
*/
static final int DEFAULT_COMMAND_TIMEOUT = 60000;
private ContainerManagementProtocolPB proxy;
public ContainerManagementProtocolPBClientImpl(long clientVersion,
InetSocketAddress addr, Configuration conf) throws IOException {
RPC.setProtocolEngine(conf, ContainerManagementProtocolPB.class,
ProtobufRpcEngine.class);
UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
int expireIntvl = conf.getInt(NM_COMMAND_TIMEOUT, DEFAULT_COMMAND_TIMEOUT);
proxy =
(ContainerManagementProtocolPB) RPC.getProxy(ContainerManagementProtocolPB.class,
clientVersion, addr, ugi, conf,
NetUtils.getDefaultSocketFactory(conf), expireIntvl);
}
@Override
public void close() {
if (this.proxy != null) {
RPC.stopProxy(this.proxy);
}
}
@Override
public StartContainersResponse
startContainers(StartContainersRequest requests) throws YarnException,
IOException {
StartContainersRequestProto requestProto =
((StartContainersRequestPBImpl) requests).getProto();
try {
return new StartContainersResponsePBImpl(proxy.startContainers(null,
requestProto));
} catch (ServiceException e) {
RPCUtil.unwrapAndThrowException(e);
return null;
}
}
@Override
public StopContainersResponse stopContainers(StopContainersRequest requests)
throws YarnException, IOException {
StopContainersRequestProto requestProto =
((StopContainersRequestPBImpl) requests).getProto();
try {
return new StopContainersResponsePBImpl(proxy.stopContainers(null,
requestProto));
} catch (ServiceException e) {
RPCUtil.unwrapAndThrowException(e);
return null;
}
}
@Override
public GetContainerStatusesResponse getContainerStatuses(
GetContainerStatusesRequest request) throws YarnException, IOException {
GetContainerStatusesRequestProto requestProto =
((GetContainerStatusesRequestPBImpl) request).getProto();
try {
return new GetContainerStatusesResponsePBImpl(proxy.getContainerStatuses(
null, requestProto));
} catch (ServiceException e) {
RPCUtil.unwrapAndThrowException(e);
return null;
}
}
}
| 5,409 | 39.984848 | 93 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/AHSProxy.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.client;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.security.PrivilegedAction;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.yarn.ipc.YarnRPC;
@InterfaceAudience.Public
@InterfaceStability.Evolving
@SuppressWarnings("unchecked")
public class AHSProxy<T> {
private static final Log LOG = LogFactory.getLog(AHSProxy.class);
public static <T> T createAHSProxy(final Configuration conf,
final Class<T> protocol, InetSocketAddress ahsAddress) throws IOException {
LOG.info("Connecting to Application History server at " + ahsAddress);
return (T) getProxy(conf, protocol, ahsAddress);
}
protected static <T> T getProxy(final Configuration conf,
final Class<T> protocol, final InetSocketAddress rmAddress)
throws IOException {
return UserGroupInformation.getCurrentUser().doAs(
new PrivilegedAction<T>() {
@Override
public T run() {
return (T) YarnRPC.create(conf).getProxy(protocol, rmAddress, conf);
}
});
}
}
| 2,151 | 36.103448 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/ConfiguredRMFailoverProxyProvider.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.client;
import java.io.Closeable;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.util.Collection;
import java.util.HashMap;
import java.util.Map;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.yarn.conf.HAUtil;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
@InterfaceAudience.Private
@InterfaceStability.Unstable
public class ConfiguredRMFailoverProxyProvider<T>
implements RMFailoverProxyProvider<T> {
private static final Log LOG =
LogFactory.getLog(ConfiguredRMFailoverProxyProvider.class);
private int currentProxyIndex = 0;
Map<String, T> proxies = new HashMap<String, T>();
private RMProxy<T> rmProxy;
private Class<T> protocol;
protected YarnConfiguration conf;
protected String[] rmServiceIds;
@Override
public void init(Configuration configuration, RMProxy<T> rmProxy,
Class<T> protocol) {
this.rmProxy = rmProxy;
this.protocol = protocol;
this.rmProxy.checkAllowedProtocols(this.protocol);
this.conf = new YarnConfiguration(configuration);
Collection<String> rmIds = HAUtil.getRMHAIds(conf);
this.rmServiceIds = rmIds.toArray(new String[rmIds.size()]);
conf.set(YarnConfiguration.RM_HA_ID, rmServiceIds[currentProxyIndex]);
conf.setInt(CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY,
conf.getInt(YarnConfiguration.CLIENT_FAILOVER_RETRIES,
YarnConfiguration.DEFAULT_CLIENT_FAILOVER_RETRIES));
conf.setInt(CommonConfigurationKeysPublic.
IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SOCKET_TIMEOUTS_KEY,
conf.getInt(YarnConfiguration.CLIENT_FAILOVER_RETRIES_ON_SOCKET_TIMEOUTS,
YarnConfiguration.DEFAULT_CLIENT_FAILOVER_RETRIES_ON_SOCKET_TIMEOUTS));
}
private T getProxyInternal() {
try {
final InetSocketAddress rmAddress = rmProxy.getRMAddress(conf, protocol);
return RMProxy.getProxy(conf, protocol, rmAddress);
} catch (IOException ioe) {
LOG.error("Unable to create proxy to the ResourceManager " +
rmServiceIds[currentProxyIndex], ioe);
return null;
}
}
@Override
public synchronized ProxyInfo<T> getProxy() {
String rmId = rmServiceIds[currentProxyIndex];
T current = proxies.get(rmId);
if (current == null) {
current = getProxyInternal();
proxies.put(rmId, current);
}
return new ProxyInfo<T>(current, rmId);
}
@Override
public synchronized void performFailover(T currentProxy) {
currentProxyIndex = (currentProxyIndex + 1) % rmServiceIds.length;
conf.set(YarnConfiguration.RM_HA_ID, rmServiceIds[currentProxyIndex]);
LOG.info("Failing over to " + rmServiceIds[currentProxyIndex]);
}
@Override
public Class<T> getInterface() {
return protocol;
}
/**
* Close all the proxy objects which have been opened over the lifetime of
* this proxy provider.
*/
@Override
public synchronized void close() throws IOException {
for (T proxy : proxies.values()) {
if (proxy instanceof Closeable) {
((Closeable)proxy).close();
} else {
RPC.stopProxy(proxy);
}
}
}
}
| 4,289 | 33.878049 | 83 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/RMHAServiceTarget.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.client;
import org.apache.hadoop.ha.BadFencingConfigurationException;
import org.apache.hadoop.ha.HAServiceTarget;
import org.apache.hadoop.ha.NodeFencer;
import org.apache.hadoop.yarn.conf.HAUtil;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import java.io.IOException;
import java.net.InetSocketAddress;
public class RMHAServiceTarget extends HAServiceTarget {
private final boolean autoFailoverEnabled;
private final InetSocketAddress haAdminServiceAddress;
public RMHAServiceTarget(YarnConfiguration conf)
throws IOException {
autoFailoverEnabled = HAUtil.isAutomaticFailoverEnabled(conf);
haAdminServiceAddress = conf.getSocketAddr(
YarnConfiguration.RM_ADMIN_ADDRESS,
YarnConfiguration.DEFAULT_RM_ADMIN_ADDRESS,
YarnConfiguration.DEFAULT_RM_ADMIN_PORT);
}
@Override
public InetSocketAddress getAddress() {
return haAdminServiceAddress;
}
@Override
public InetSocketAddress getZKFCAddress() {
// TODO (YARN-1177): ZKFC implementation
throw new UnsupportedOperationException("RMHAServiceTarget doesn't have " +
"a corresponding ZKFC address");
}
@Override
public NodeFencer getFencer() {
return null;
}
@Override
public void checkFencingConfigured() throws BadFencingConfigurationException {
throw new BadFencingConfigurationException("Fencer not configured");
}
@Override
public boolean isAutoFailoverEnabled() {
return autoFailoverEnabled;
}
}
| 2,315 | 32.085714 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/RMFailoverProxyProvider.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.client;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.retry.FailoverProxyProvider;
@InterfaceAudience.Private
public interface RMFailoverProxyProvider<T> extends FailoverProxyProvider <T> {
/**
* Initialize internal data structures, invoked right after instantiation.
*
* @param conf Configuration to use
* @param proxy The {@link RMProxy} instance to use
* @param protocol The communication protocol to use
*/
public void init(Configuration conf, RMProxy<T> proxy, Class<T> protocol);
}
| 1,437 | 38.944444 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/NMProxy.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.client;
import java.net.InetSocketAddress;
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.retry.RetryPolicy;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.ipc.YarnRPC;
@Public
@Unstable
public class NMProxy extends ServerProxy {
public static <T> T createNMProxy(final Configuration conf,
final Class<T> protocol, final UserGroupInformation ugi,
final YarnRPC rpc, final InetSocketAddress serverAddress) {
RetryPolicy retryPolicy =
createRetryPolicy(conf,
YarnConfiguration.CLIENT_NM_CONNECT_MAX_WAIT_MS,
YarnConfiguration.DEFAULT_CLIENT_NM_CONNECT_MAX_WAIT_MS,
YarnConfiguration.CLIENT_NM_CONNECT_RETRY_INTERVAL_MS,
YarnConfiguration.DEFAULT_CLIENT_NM_CONNECT_RETRY_INTERVAL_MS);
return createRetriableProxy(conf, protocol, ugi, rpc, serverAddress,
retryPolicy);
}
}
| 1,945 | 38.714286 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/RMProxy.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.client;
import java.io.EOFException;
import java.io.IOException;
import java.net.ConnectException;
import java.net.InetSocketAddress;
import java.net.NoRouteToHostException;
import java.net.SocketException;
import java.net.UnknownHostException;
import java.security.PrivilegedAction;
import java.util.HashMap;
import java.util.Map;
import java.util.concurrent.TimeUnit;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.retry.RetryPolicies;
import org.apache.hadoop.io.retry.RetryPolicy;
import org.apache.hadoop.io.retry.RetryProxy;
import org.apache.hadoop.ipc.RetriableException;
import org.apache.hadoop.net.ConnectTimeoutException;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.util.ReflectionUtils;
import org.apache.hadoop.yarn.conf.HAUtil;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
import org.apache.hadoop.yarn.ipc.YarnRPC;
import com.google.common.annotations.VisibleForTesting;
@InterfaceAudience.Public
@InterfaceStability.Evolving
@SuppressWarnings("unchecked")
public class RMProxy<T> {
private static final Log LOG = LogFactory.getLog(RMProxy.class);
protected RMProxy() {}
/**
* Verify the passed protocol is supported.
*/
@Private
protected void checkAllowedProtocols(Class<?> protocol) {}
/**
* Get the ResourceManager address from the provided Configuration for the
* given protocol.
*/
@Private
protected InetSocketAddress getRMAddress(
YarnConfiguration conf, Class<?> protocol) throws IOException {
throw new UnsupportedOperationException("This method should be invoked " +
"from an instance of ClientRMProxy or ServerRMProxy");
}
/**
* Create a proxy for the specified protocol. For non-HA,
* this is a direct connection to the ResourceManager address. When HA is
* enabled, the proxy handles the failover between the ResourceManagers as
* well.
*/
@Private
protected static <T> T createRMProxy(final Configuration configuration,
final Class<T> protocol, RMProxy instance) throws IOException {
YarnConfiguration conf = (configuration instanceof YarnConfiguration)
? (YarnConfiguration) configuration
: new YarnConfiguration(configuration);
RetryPolicy retryPolicy = createRetryPolicy(conf);
if (HAUtil.isHAEnabled(conf)) {
RMFailoverProxyProvider<T> provider =
instance.createRMFailoverProxyProvider(conf, protocol);
return (T) RetryProxy.create(protocol, provider, retryPolicy);
} else {
InetSocketAddress rmAddress = instance.getRMAddress(conf, protocol);
LOG.info("Connecting to ResourceManager at " + rmAddress);
T proxy = RMProxy.<T>getProxy(conf, protocol, rmAddress);
return (T) RetryProxy.create(protocol, proxy, retryPolicy);
}
}
/**
* @deprecated
* This method is deprecated and is not used by YARN internally any more.
* To create a proxy to the RM, use ClientRMProxy#createRMProxy or
* ServerRMProxy#createRMProxy.
*
* Create a proxy to the ResourceManager at the specified address.
*
* @param conf Configuration to generate retry policy
* @param protocol Protocol for the proxy
* @param rmAddress Address of the ResourceManager
* @param <T> Type information of the proxy
* @return Proxy to the RM
* @throws IOException
*/
@Deprecated
public static <T> T createRMProxy(final Configuration conf,
final Class<T> protocol, InetSocketAddress rmAddress) throws IOException {
RetryPolicy retryPolicy = createRetryPolicy(conf);
T proxy = RMProxy.<T>getProxy(conf, protocol, rmAddress);
LOG.info("Connecting to ResourceManager at " + rmAddress);
return (T) RetryProxy.create(protocol, proxy, retryPolicy);
}
/**
* Get a proxy to the RM at the specified address. To be used to create a
* RetryProxy.
*/
@Private
static <T> T getProxy(final Configuration conf,
final Class<T> protocol, final InetSocketAddress rmAddress)
throws IOException {
return UserGroupInformation.getCurrentUser().doAs(
new PrivilegedAction<T>() {
@Override
public T run() {
return (T) YarnRPC.create(conf).getProxy(protocol, rmAddress, conf);
}
});
}
/**
* Helper method to create FailoverProxyProvider.
*/
private <T> RMFailoverProxyProvider<T> createRMFailoverProxyProvider(
Configuration conf, Class<T> protocol) {
Class<? extends RMFailoverProxyProvider<T>> defaultProviderClass;
try {
defaultProviderClass = (Class<? extends RMFailoverProxyProvider<T>>)
Class.forName(
YarnConfiguration.DEFAULT_CLIENT_FAILOVER_PROXY_PROVIDER);
} catch (Exception e) {
throw new YarnRuntimeException("Invalid default failover provider class" +
YarnConfiguration.DEFAULT_CLIENT_FAILOVER_PROXY_PROVIDER, e);
}
RMFailoverProxyProvider<T> provider = ReflectionUtils.newInstance(
conf.getClass(YarnConfiguration.CLIENT_FAILOVER_PROXY_PROVIDER,
defaultProviderClass, RMFailoverProxyProvider.class), conf);
provider.init(conf, (RMProxy<T>) this, protocol);
return provider;
}
/**
* Fetch retry policy from Configuration
*/
@Private
@VisibleForTesting
public static RetryPolicy createRetryPolicy(Configuration conf) {
long rmConnectWaitMS =
conf.getLong(
YarnConfiguration.RESOURCEMANAGER_CONNECT_MAX_WAIT_MS,
YarnConfiguration.DEFAULT_RESOURCEMANAGER_CONNECT_MAX_WAIT_MS);
long rmConnectionRetryIntervalMS =
conf.getLong(
YarnConfiguration.RESOURCEMANAGER_CONNECT_RETRY_INTERVAL_MS,
YarnConfiguration
.DEFAULT_RESOURCEMANAGER_CONNECT_RETRY_INTERVAL_MS);
boolean waitForEver = (rmConnectWaitMS == -1);
if (!waitForEver) {
if (rmConnectWaitMS < 0) {
throw new YarnRuntimeException("Invalid Configuration. "
+ YarnConfiguration.RESOURCEMANAGER_CONNECT_MAX_WAIT_MS
+ " can be -1, but can not be other negative numbers");
}
// try connect once
if (rmConnectWaitMS < rmConnectionRetryIntervalMS) {
LOG.warn(YarnConfiguration.RESOURCEMANAGER_CONNECT_MAX_WAIT_MS
+ " is smaller than "
+ YarnConfiguration.RESOURCEMANAGER_CONNECT_RETRY_INTERVAL_MS
+ ". Only try connect once.");
rmConnectWaitMS = 0;
}
}
// Handle HA case first
if (HAUtil.isHAEnabled(conf)) {
final long failoverSleepBaseMs = conf.getLong(
YarnConfiguration.CLIENT_FAILOVER_SLEEPTIME_BASE_MS,
rmConnectionRetryIntervalMS);
final long failoverSleepMaxMs = conf.getLong(
YarnConfiguration.CLIENT_FAILOVER_SLEEPTIME_MAX_MS,
rmConnectionRetryIntervalMS);
int maxFailoverAttempts = conf.getInt(
YarnConfiguration.CLIENT_FAILOVER_MAX_ATTEMPTS, -1);
if (maxFailoverAttempts == -1) {
if (waitForEver) {
maxFailoverAttempts = Integer.MAX_VALUE;
} else {
maxFailoverAttempts = (int) (rmConnectWaitMS / failoverSleepBaseMs);
}
}
return RetryPolicies.failoverOnNetworkException(
RetryPolicies.TRY_ONCE_THEN_FAIL, maxFailoverAttempts,
failoverSleepBaseMs, failoverSleepMaxMs);
}
if (rmConnectionRetryIntervalMS < 0) {
throw new YarnRuntimeException("Invalid Configuration. " +
YarnConfiguration.RESOURCEMANAGER_CONNECT_RETRY_INTERVAL_MS +
" should not be negative.");
}
RetryPolicy retryPolicy = null;
if (waitForEver) {
retryPolicy = RetryPolicies.RETRY_FOREVER;
} else {
retryPolicy =
RetryPolicies.retryUpToMaximumTimeWithFixedSleep(rmConnectWaitMS,
rmConnectionRetryIntervalMS, TimeUnit.MILLISECONDS);
}
Map<Class<? extends Exception>, RetryPolicy> exceptionToPolicyMap =
new HashMap<Class<? extends Exception>, RetryPolicy>();
exceptionToPolicyMap.put(EOFException.class, retryPolicy);
exceptionToPolicyMap.put(ConnectException.class, retryPolicy);
exceptionToPolicyMap.put(NoRouteToHostException.class, retryPolicy);
exceptionToPolicyMap.put(UnknownHostException.class, retryPolicy);
exceptionToPolicyMap.put(ConnectTimeoutException.class, retryPolicy);
exceptionToPolicyMap.put(RetriableException.class, retryPolicy);
exceptionToPolicyMap.put(SocketException.class, retryPolicy);
return RetryPolicies.retryByException(
RetryPolicies.TRY_ONCE_THEN_FAIL, exceptionToPolicyMap);
}
}
| 9,736 | 36.88716 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/ClientRMProxy.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.client;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.util.ArrayList;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.TokenIdentifier;
import org.apache.hadoop.yarn.api.ApplicationClientProtocol;
import org.apache.hadoop.yarn.api.ApplicationMasterProtocol;
import org.apache.hadoop.yarn.conf.HAUtil;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.security.AMRMTokenIdentifier;
import org.apache.hadoop.yarn.server.api.ResourceManagerAdministrationProtocol;
import com.google.common.base.Joiner;
import com.google.common.base.Preconditions;
@InterfaceAudience.Public
@InterfaceStability.Stable
public class ClientRMProxy<T> extends RMProxy<T> {
private static final Log LOG = LogFactory.getLog(ClientRMProxy.class);
private static final ClientRMProxy INSTANCE = new ClientRMProxy();
private interface ClientRMProtocols extends ApplicationClientProtocol,
ApplicationMasterProtocol, ResourceManagerAdministrationProtocol {
// Add nothing
}
private ClientRMProxy(){
super();
}
/**
* Create a proxy to the ResourceManager for the specified protocol.
* @param configuration Configuration with all the required information.
* @param protocol Client protocol for which proxy is being requested.
* @param <T> Type of proxy.
* @return Proxy to the ResourceManager for the specified client protocol.
* @throws IOException
*/
public static <T> T createRMProxy(final Configuration configuration,
final Class<T> protocol) throws IOException {
return createRMProxy(configuration, protocol, INSTANCE);
}
private static void setAMRMTokenService(final Configuration conf)
throws IOException {
for (Token<? extends TokenIdentifier> token : UserGroupInformation
.getCurrentUser().getTokens()) {
if (token.getKind().equals(AMRMTokenIdentifier.KIND_NAME)) {
token.setService(getAMRMTokenService(conf));
}
}
}
@Private
@Override
protected InetSocketAddress getRMAddress(YarnConfiguration conf,
Class<?> protocol) throws IOException {
if (protocol == ApplicationClientProtocol.class) {
return conf.getSocketAddr(YarnConfiguration.RM_ADDRESS,
YarnConfiguration.DEFAULT_RM_ADDRESS,
YarnConfiguration.DEFAULT_RM_PORT);
} else if (protocol == ResourceManagerAdministrationProtocol.class) {
return conf.getSocketAddr(
YarnConfiguration.RM_ADMIN_ADDRESS,
YarnConfiguration.DEFAULT_RM_ADMIN_ADDRESS,
YarnConfiguration.DEFAULT_RM_ADMIN_PORT);
} else if (protocol == ApplicationMasterProtocol.class) {
setAMRMTokenService(conf);
return conf.getSocketAddr(YarnConfiguration.RM_SCHEDULER_ADDRESS,
YarnConfiguration.DEFAULT_RM_SCHEDULER_ADDRESS,
YarnConfiguration.DEFAULT_RM_SCHEDULER_PORT);
} else {
String message = "Unsupported protocol found when creating the proxy " +
"connection to ResourceManager: " +
((protocol != null) ? protocol.getClass().getName() : "null");
LOG.error(message);
throw new IllegalStateException(message);
}
}
@Private
@Override
protected void checkAllowedProtocols(Class<?> protocol) {
Preconditions.checkArgument(
protocol.isAssignableFrom(ClientRMProtocols.class),
"RM does not support this client protocol");
}
/**
* Get the token service name to be used for RMDelegationToken. Depending
* on whether HA is enabled or not, this method generates the appropriate
* service name as a comma-separated list of service addresses.
*
* @param conf Configuration corresponding to the cluster we need the
* RMDelegationToken for
* @return - Service name for RMDelegationToken
*/
@Unstable
public static Text getRMDelegationTokenService(Configuration conf) {
return getTokenService(conf, YarnConfiguration.RM_ADDRESS,
YarnConfiguration.DEFAULT_RM_ADDRESS,
YarnConfiguration.DEFAULT_RM_PORT);
}
@Unstable
public static Text getAMRMTokenService(Configuration conf) {
return getTokenService(conf, YarnConfiguration.RM_SCHEDULER_ADDRESS,
YarnConfiguration.DEFAULT_RM_SCHEDULER_ADDRESS,
YarnConfiguration.DEFAULT_RM_SCHEDULER_PORT);
}
@Unstable
public static Text getTokenService(Configuration conf, String address,
String defaultAddr, int defaultPort) {
if (HAUtil.isHAEnabled(conf)) {
// Build a list of service addresses to form the service name
ArrayList<String> services = new ArrayList<String>();
YarnConfiguration yarnConf = new YarnConfiguration(conf);
for (String rmId : HAUtil.getRMHAIds(conf)) {
// Set RM_ID to get the corresponding RM_ADDRESS
yarnConf.set(YarnConfiguration.RM_HA_ID, rmId);
services.add(SecurityUtil.buildTokenService(
yarnConf.getSocketAddr(address, defaultAddr, defaultPort))
.toString());
}
return new Text(Joiner.on(',').join(services));
}
// Non-HA case - no need to set RM_ID
return SecurityUtil.buildTokenService(conf.getSocketAddr(address,
defaultAddr, defaultPort));
}
}
| 6,582 | 38.89697 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/ServerProxy.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.client;
import java.io.EOFException;
import java.net.ConnectException;
import java.net.InetSocketAddress;
import java.net.NoRouteToHostException;
import java.net.SocketException;
import java.net.UnknownHostException;
import java.security.PrivilegedAction;
import java.util.HashMap;
import java.util.Map;
import java.util.concurrent.TimeUnit;
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.retry.RetryPolicies;
import org.apache.hadoop.io.retry.RetryPolicy;
import org.apache.hadoop.io.retry.RetryProxy;
import org.apache.hadoop.ipc.RetriableException;
import org.apache.hadoop.net.ConnectTimeoutException;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.yarn.exceptions.NMNotYetReadyException;
import org.apache.hadoop.yarn.ipc.YarnRPC;
import com.google.common.base.Preconditions;
@Public
@Unstable
public class ServerProxy {
protected static RetryPolicy createRetryPolicy(Configuration conf,
String maxWaitTimeStr, long defMaxWaitTime,
String connectRetryIntervalStr, long defRetryInterval) {
long maxWaitTime = conf.getLong(maxWaitTimeStr, defMaxWaitTime);
long retryIntervalMS =
conf.getLong(connectRetryIntervalStr, defRetryInterval);
Preconditions.checkArgument((maxWaitTime == -1 || maxWaitTime > 0),
"Invalid Configuration. " + maxWaitTimeStr + " should be either"
+ " positive value or -1.");
Preconditions.checkArgument(retryIntervalMS > 0, "Invalid Configuration. "
+ connectRetryIntervalStr + "should be a positive value.");
RetryPolicy retryPolicy = null;
if (maxWaitTime == -1) {
// wait forever.
retryPolicy = RetryPolicies.RETRY_FOREVER;
} else {
retryPolicy =
RetryPolicies.retryUpToMaximumTimeWithFixedSleep(maxWaitTime,
retryIntervalMS, TimeUnit.MILLISECONDS);
}
Map<Class<? extends Exception>, RetryPolicy> exceptionToPolicyMap =
new HashMap<Class<? extends Exception>, RetryPolicy>();
exceptionToPolicyMap.put(EOFException.class, retryPolicy);
exceptionToPolicyMap.put(ConnectException.class, retryPolicy);
exceptionToPolicyMap.put(NoRouteToHostException.class, retryPolicy);
exceptionToPolicyMap.put(UnknownHostException.class, retryPolicy);
exceptionToPolicyMap.put(RetriableException.class, retryPolicy);
exceptionToPolicyMap.put(SocketException.class, retryPolicy);
exceptionToPolicyMap.put(NMNotYetReadyException.class, retryPolicy);
return RetryPolicies.retryByException(RetryPolicies.TRY_ONCE_THEN_FAIL,
exceptionToPolicyMap);
}
@SuppressWarnings("unchecked")
protected static <T> T createRetriableProxy(final Configuration conf,
final Class<T> protocol, final UserGroupInformation user,
final YarnRPC rpc, final InetSocketAddress serverAddress,
RetryPolicy retryPolicy) {
T proxy = user.doAs(new PrivilegedAction<T>() {
@Override
public T run() {
return (T) rpc.getProxy(protocol, serverAddress, conf);
}
});
return (T) RetryProxy.create(protocol, proxy, retryPolicy);
}
}
| 4,082 | 39.83 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/package-info.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.client.api;
| 851 | 41.6 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/TimelineClient.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.client.api;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Evolving;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.service.AbstractService;
import org.apache.hadoop.yarn.api.records.timeline.TimelineEntity;
import org.apache.hadoop.yarn.api.records.timeline.TimelineDomain;
import org.apache.hadoop.yarn.api.records.timeline.TimelinePutResponse;
import org.apache.hadoop.yarn.client.api.impl.TimelineClientImpl;
import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.security.client.TimelineDelegationTokenIdentifier;
/**
* A client library that can be used to post some information in terms of a
* number of conceptual entities.
*/
@Public
@Evolving
public abstract class TimelineClient extends AbstractService {
/**
* Create a timeline client. The current UGI when the user initialize the
* client will be used to do the put and the delegation token operations. The
* current user may use {@link UserGroupInformation#doAs} another user to
* construct and initialize a timeline client if the following operations are
* supposed to be conducted by that user.
*
* @return a timeline client
*/
@Public
public static TimelineClient createTimelineClient() {
TimelineClient client = new TimelineClientImpl();
return client;
}
@Private
protected TimelineClient(String name) {
super(name);
}
/**
* <p>
* Send the information of a number of conceptual entities to the timeline
* server. It is a blocking API. The method will not return until it gets the
* response from the timeline server.
* </p>
*
* @param entities
* the collection of {@link TimelineEntity}
* @return the error information if the sent entities are not correctly stored
* @throws IOException
* @throws YarnException
*/
@Public
public abstract TimelinePutResponse putEntities(
TimelineEntity... entities) throws IOException, YarnException;
/**
* <p>
* Send the information of a domain to the timeline server. It is a
* blocking API. The method will not return until it gets the response from
* the timeline server.
* </p>
*
* @param domain
* an {@link TimelineDomain} object
* @throws IOException
* @throws YarnException
*/
@Public
public abstract void putDomain(
TimelineDomain domain) throws IOException, YarnException;
/**
* <p>
* Get a delegation token so as to be able to talk to the timeline server in a
* secure way.
* </p>
*
* @param renewer
* Address of the renewer who can renew these tokens when needed by
* securely talking to the timeline server
* @return a delegation token ({@link Token}) that can be used to talk to the
* timeline server
* @throws IOException
* @throws YarnException
*/
@Public
public abstract Token<TimelineDelegationTokenIdentifier> getDelegationToken(
String renewer) throws IOException, YarnException;
/**
* <p>
* Renew a timeline delegation token.
* </p>
*
* @param timelineDT
* the delegation token to renew
* @return the new expiration time
* @throws IOException
* @throws YarnException
*/
@Public
public abstract long renewDelegationToken(
Token<TimelineDelegationTokenIdentifier> timelineDT)
throws IOException, YarnException;
/**
* <p>
* Cancel a timeline delegation token.
* </p>
*
* @param timelineDT
* the delegation token to cancel
* @throws IOException
* @throws YarnException
*/
@Public
public abstract void cancelDelegationToken(
Token<TimelineDelegationTokenIdentifier> timelineDT)
throws IOException, YarnException;
}
| 4,830 | 32.089041 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/package-info.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.client.api.impl;
| 856 | 41.85 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineClientImpl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.client.api.impl;
import java.io.File;
import java.io.IOException;
import java.lang.reflect.UndeclaredThrowableException;
import java.net.ConnectException;
import java.net.HttpURLConnection;
import java.net.InetSocketAddress;
import java.net.URI;
import java.net.URL;
import java.net.URLConnection;
import java.security.GeneralSecurityException;
import java.security.PrivilegedExceptionAction;
import java.util.Arrays;
import javax.net.ssl.HostnameVerifier;
import javax.net.ssl.HttpsURLConnection;
import javax.net.ssl.SSLSocketFactory;
import javax.ws.rs.core.MediaType;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.GnuParser;
import org.apache.commons.cli.HelpFormatter;
import org.apache.commons.cli.Options;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Evolving;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.security.authentication.client.AuthenticationException;
import org.apache.hadoop.security.authentication.client.ConnectionConfigurator;
import org.apache.hadoop.security.ssl.SSLFactory;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticatedURL;
import org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticator;
import org.apache.hadoop.security.token.delegation.web.KerberosDelegationTokenAuthenticator;
import org.apache.hadoop.security.token.delegation.web.PseudoDelegationTokenAuthenticator;
import org.apache.hadoop.yarn.api.records.timeline.TimelineDomain;
import org.apache.hadoop.yarn.api.records.timeline.TimelineDomains;
import org.apache.hadoop.yarn.api.records.timeline.TimelineEntities;
import org.apache.hadoop.yarn.api.records.timeline.TimelineEntity;
import org.apache.hadoop.yarn.api.records.timeline.TimelinePutResponse;
import org.apache.hadoop.yarn.client.api.TimelineClient;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
import org.apache.hadoop.yarn.security.client.TimelineDelegationTokenIdentifier;
import org.apache.hadoop.yarn.webapp.YarnJacksonJaxbJsonProvider;
import org.codehaus.jackson.map.ObjectMapper;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Joiner;
import com.google.common.base.Preconditions;
import com.sun.jersey.api.client.Client;
import com.sun.jersey.api.client.ClientHandlerException;
import com.sun.jersey.api.client.ClientRequest;
import com.sun.jersey.api.client.ClientResponse;
import com.sun.jersey.api.client.WebResource;
import com.sun.jersey.api.client.config.ClientConfig;
import com.sun.jersey.api.client.config.DefaultClientConfig;
import com.sun.jersey.api.client.filter.ClientFilter;
import com.sun.jersey.client.urlconnection.HttpURLConnectionFactory;
import com.sun.jersey.client.urlconnection.URLConnectionClientHandler;
@Private
@Evolving
public class TimelineClientImpl extends TimelineClient {
private static final Log LOG = LogFactory.getLog(TimelineClientImpl.class);
private static final String RESOURCE_URI_STR = "/ws/v1/timeline/";
private static final Joiner JOINER = Joiner.on("");
public final static int DEFAULT_SOCKET_TIMEOUT = 1 * 60 * 1000; // 1 minute
private static Options opts;
private static final String ENTITY_DATA_TYPE = "entity";
private static final String DOMAIN_DATA_TYPE = "domain";
static {
opts = new Options();
opts.addOption("put", true, "Put the timeline entities/domain in a JSON file");
opts.getOption("put").setArgName("Path to the JSON file");
opts.addOption(ENTITY_DATA_TYPE, false, "Specify the JSON file contains the entities");
opts.addOption(DOMAIN_DATA_TYPE, false, "Specify the JSON file contains the domain");
opts.addOption("help", false, "Print usage");
}
private Client client;
private ConnectionConfigurator connConfigurator;
private DelegationTokenAuthenticator authenticator;
private DelegationTokenAuthenticatedURL.Token token;
private URI resURI;
private UserGroupInformation authUgi;
private String doAsUser;
@Private
@VisibleForTesting
TimelineClientConnectionRetry connectionRetry;
// Abstract class for an operation that should be retried by timeline client
private static abstract class TimelineClientRetryOp {
// The operation that should be retried
public abstract Object run() throws IOException;
// The method to indicate if we should retry given the incoming exception
public abstract boolean shouldRetryOn(Exception e);
}
// Class to handle retry
// Outside this class, only visible to tests
@Private
@VisibleForTesting
static class TimelineClientConnectionRetry {
// maxRetries < 0 means keep trying
@Private
@VisibleForTesting
public int maxRetries;
@Private
@VisibleForTesting
public long retryInterval;
// Indicates if retries happened last time. Only tests should read it.
// In unit tests, retryOn() calls should _not_ be concurrent.
private boolean retried = false;
@Private
@VisibleForTesting
boolean getRetired() {
return retried;
}
// Constructor with default retry settings
public TimelineClientConnectionRetry(Configuration conf) {
Preconditions.checkArgument(conf.getInt(
YarnConfiguration.TIMELINE_SERVICE_CLIENT_MAX_RETRIES,
YarnConfiguration.DEFAULT_TIMELINE_SERVICE_CLIENT_MAX_RETRIES) >= -1,
"%s property value should be greater than or equal to -1",
YarnConfiguration.TIMELINE_SERVICE_CLIENT_MAX_RETRIES);
Preconditions
.checkArgument(
conf.getLong(
YarnConfiguration.TIMELINE_SERVICE_CLIENT_RETRY_INTERVAL_MS,
YarnConfiguration.DEFAULT_TIMELINE_SERVICE_CLIENT_RETRY_INTERVAL_MS) > 0,
"%s property value should be greater than zero",
YarnConfiguration.TIMELINE_SERVICE_CLIENT_RETRY_INTERVAL_MS);
maxRetries = conf.getInt(
YarnConfiguration.TIMELINE_SERVICE_CLIENT_MAX_RETRIES,
YarnConfiguration.DEFAULT_TIMELINE_SERVICE_CLIENT_MAX_RETRIES);
retryInterval = conf.getLong(
YarnConfiguration.TIMELINE_SERVICE_CLIENT_RETRY_INTERVAL_MS,
YarnConfiguration.DEFAULT_TIMELINE_SERVICE_CLIENT_RETRY_INTERVAL_MS);
}
public Object retryOn(TimelineClientRetryOp op)
throws RuntimeException, IOException {
int leftRetries = maxRetries;
retried = false;
// keep trying
while (true) {
try {
// try perform the op, if fail, keep retrying
return op.run();
} catch (IOException | RuntimeException e) {
// break if there's no retries left
if (leftRetries == 0) {
break;
}
if (op.shouldRetryOn(e)) {
logException(e, leftRetries);
} else {
throw e;
}
}
if (leftRetries > 0) {
leftRetries--;
}
retried = true;
try {
// sleep for the given time interval
Thread.sleep(retryInterval);
} catch (InterruptedException ie) {
LOG.warn("Client retry sleep interrupted! ");
}
}
throw new RuntimeException("Failed to connect to timeline server. "
+ "Connection retries limit exceeded. "
+ "The posted timeline event may be missing");
};
private void logException(Exception e, int leftRetries) {
if (leftRetries > 0) {
LOG.info("Exception caught by TimelineClientConnectionRetry,"
+ " will try " + leftRetries + " more time(s).\nMessage: "
+ e.getMessage());
} else {
// note that maxRetries may be -1 at the very beginning
LOG.info("ConnectionException caught by TimelineClientConnectionRetry,"
+ " will keep retrying.\nMessage: "
+ e.getMessage());
}
}
}
private class TimelineJerseyRetryFilter extends ClientFilter {
@Override
public ClientResponse handle(final ClientRequest cr)
throws ClientHandlerException {
// Set up the retry operation
TimelineClientRetryOp jerseyRetryOp = new TimelineClientRetryOp() {
@Override
public Object run() {
// Try pass the request, if fail, keep retrying
return getNext().handle(cr);
}
@Override
public boolean shouldRetryOn(Exception e) {
// Only retry on connection exceptions
return (e instanceof ClientHandlerException)
&& (e.getCause() instanceof ConnectException);
}
};
try {
return (ClientResponse) connectionRetry.retryOn(jerseyRetryOp);
} catch (IOException e) {
throw new ClientHandlerException("Jersey retry failed!\nMessage: "
+ e.getMessage());
}
}
}
public TimelineClientImpl() {
super(TimelineClientImpl.class.getName());
}
protected void serviceInit(Configuration conf) throws Exception {
UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
UserGroupInformation realUgi = ugi.getRealUser();
if (realUgi != null) {
authUgi = realUgi;
doAsUser = ugi.getShortUserName();
} else {
authUgi = ugi;
doAsUser = null;
}
ClientConfig cc = new DefaultClientConfig();
cc.getClasses().add(YarnJacksonJaxbJsonProvider.class);
connConfigurator = newConnConfigurator(conf);
if (UserGroupInformation.isSecurityEnabled()) {
authenticator = new KerberosDelegationTokenAuthenticator();
} else {
authenticator = new PseudoDelegationTokenAuthenticator();
}
authenticator.setConnectionConfigurator(connConfigurator);
token = new DelegationTokenAuthenticatedURL.Token();
connectionRetry = new TimelineClientConnectionRetry(conf);
client = new Client(new URLConnectionClientHandler(
new TimelineURLConnectionFactory()), cc);
TimelineJerseyRetryFilter retryFilter = new TimelineJerseyRetryFilter();
client.addFilter(retryFilter);
if (YarnConfiguration.useHttps(conf)) {
resURI = URI
.create(JOINER.join("https://", conf.get(
YarnConfiguration.TIMELINE_SERVICE_WEBAPP_HTTPS_ADDRESS,
YarnConfiguration.DEFAULT_TIMELINE_SERVICE_WEBAPP_HTTPS_ADDRESS),
RESOURCE_URI_STR));
} else {
resURI = URI.create(JOINER.join("http://", conf.get(
YarnConfiguration.TIMELINE_SERVICE_WEBAPP_ADDRESS,
YarnConfiguration.DEFAULT_TIMELINE_SERVICE_WEBAPP_ADDRESS),
RESOURCE_URI_STR));
}
LOG.info("Timeline service address: " + resURI);
super.serviceInit(conf);
}
@Override
public TimelinePutResponse putEntities(
TimelineEntity... entities) throws IOException, YarnException {
TimelineEntities entitiesContainer = new TimelineEntities();
entitiesContainer.addEntities(Arrays.asList(entities));
ClientResponse resp = doPosting(entitiesContainer, null);
return resp.getEntity(TimelinePutResponse.class);
}
@Override
public void putDomain(TimelineDomain domain) throws IOException,
YarnException {
doPosting(domain, "domain");
}
private ClientResponse doPosting(final Object obj, final String path)
throws IOException, YarnException {
ClientResponse resp;
try {
resp = authUgi.doAs(new PrivilegedExceptionAction<ClientResponse>() {
@Override
public ClientResponse run() throws Exception {
return doPostingObject(obj, path);
}
});
} catch (UndeclaredThrowableException e) {
throw new IOException(e.getCause());
} catch (InterruptedException ie) {
throw new IOException(ie);
}
if (resp == null ||
resp.getClientResponseStatus() != ClientResponse.Status.OK) {
String msg =
"Failed to get the response from the timeline server.";
LOG.error(msg);
if (LOG.isDebugEnabled() && resp != null) {
String output = resp.getEntity(String.class);
LOG.debug("HTTP error code: " + resp.getStatus()
+ " Server response : \n" + output);
}
throw new YarnException(msg);
}
return resp;
}
@SuppressWarnings("unchecked")
@Override
public Token<TimelineDelegationTokenIdentifier> getDelegationToken(
final String renewer) throws IOException, YarnException {
PrivilegedExceptionAction<Token<TimelineDelegationTokenIdentifier>> getDTAction =
new PrivilegedExceptionAction<Token<TimelineDelegationTokenIdentifier>>() {
@Override
public Token<TimelineDelegationTokenIdentifier> run()
throws Exception {
DelegationTokenAuthenticatedURL authUrl =
new DelegationTokenAuthenticatedURL(authenticator,
connConfigurator);
return (Token) authUrl.getDelegationToken(
resURI.toURL(), token, renewer, doAsUser);
}
};
return (Token<TimelineDelegationTokenIdentifier>) operateDelegationToken(getDTAction);
}
@SuppressWarnings("unchecked")
@Override
public long renewDelegationToken(
final Token<TimelineDelegationTokenIdentifier> timelineDT)
throws IOException, YarnException {
final boolean isTokenServiceAddrEmpty =
timelineDT.getService().toString().isEmpty();
final String scheme = isTokenServiceAddrEmpty ? null
: (YarnConfiguration.useHttps(this.getConfig()) ? "https" : "http");
final InetSocketAddress address = isTokenServiceAddrEmpty ? null
: SecurityUtil.getTokenServiceAddr(timelineDT);
PrivilegedExceptionAction<Long> renewDTAction =
new PrivilegedExceptionAction<Long>() {
@Override
public Long run() throws Exception {
// If the timeline DT to renew is different than cached, replace it.
// Token to set every time for retry, because when exception happens,
// DelegationTokenAuthenticatedURL will reset it to null;
if (!timelineDT.equals(token.getDelegationToken())) {
token.setDelegationToken((Token) timelineDT);
}
DelegationTokenAuthenticatedURL authUrl =
new DelegationTokenAuthenticatedURL(authenticator,
connConfigurator);
// If the token service address is not available, fall back to use
// the configured service address.
final URI serviceURI = isTokenServiceAddrEmpty ? resURI
: new URI(scheme, null, address.getHostName(),
address.getPort(), RESOURCE_URI_STR, null, null);
return authUrl
.renewDelegationToken(serviceURI.toURL(), token, doAsUser);
}
};
return (Long) operateDelegationToken(renewDTAction);
}
@SuppressWarnings("unchecked")
@Override
public void cancelDelegationToken(
final Token<TimelineDelegationTokenIdentifier> timelineDT)
throws IOException, YarnException {
final boolean isTokenServiceAddrEmpty =
timelineDT.getService().toString().isEmpty();
final String scheme = isTokenServiceAddrEmpty ? null
: (YarnConfiguration.useHttps(this.getConfig()) ? "https" : "http");
final InetSocketAddress address = isTokenServiceAddrEmpty ? null
: SecurityUtil.getTokenServiceAddr(timelineDT);
PrivilegedExceptionAction<Void> cancelDTAction =
new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
// If the timeline DT to cancel is different than cached, replace it.
// Token to set every time for retry, because when exception happens,
// DelegationTokenAuthenticatedURL will reset it to null;
if (!timelineDT.equals(token.getDelegationToken())) {
token.setDelegationToken((Token) timelineDT);
}
DelegationTokenAuthenticatedURL authUrl =
new DelegationTokenAuthenticatedURL(authenticator,
connConfigurator);
// If the token service address is not available, fall back to use
// the configured service address.
final URI serviceURI = isTokenServiceAddrEmpty ? resURI
: new URI(scheme, null, address.getHostName(),
address.getPort(), RESOURCE_URI_STR, null, null);
authUrl.cancelDelegationToken(serviceURI.toURL(), token, doAsUser);
return null;
}
};
operateDelegationToken(cancelDTAction);
}
private Object operateDelegationToken(
final PrivilegedExceptionAction<?> action)
throws IOException, YarnException {
// Set up the retry operation
TimelineClientRetryOp tokenRetryOp = new TimelineClientRetryOp() {
@Override
public Object run() throws IOException {
// Try pass the request, if fail, keep retrying
authUgi.checkTGTAndReloginFromKeytab();
try {
return authUgi.doAs(action);
} catch (UndeclaredThrowableException e) {
throw new IOException(e.getCause());
} catch (InterruptedException e) {
throw new IOException(e);
}
}
@Override
public boolean shouldRetryOn(Exception e) {
// Only retry on connection exceptions
return (e instanceof ConnectException);
}
};
return connectionRetry.retryOn(tokenRetryOp);
}
@Private
@VisibleForTesting
public ClientResponse doPostingObject(Object object, String path) {
WebResource webResource = client.resource(resURI);
if (path == null) {
return webResource.accept(MediaType.APPLICATION_JSON)
.type(MediaType.APPLICATION_JSON)
.post(ClientResponse.class, object);
} else if (path.equals("domain")) {
return webResource.path(path).accept(MediaType.APPLICATION_JSON)
.type(MediaType.APPLICATION_JSON)
.put(ClientResponse.class, object);
} else {
throw new YarnRuntimeException("Unknown resource type");
}
}
private class TimelineURLConnectionFactory
implements HttpURLConnectionFactory {
@Override
public HttpURLConnection getHttpURLConnection(final URL url) throws IOException {
authUgi.checkTGTAndReloginFromKeytab();
try {
return new DelegationTokenAuthenticatedURL(
authenticator, connConfigurator).openConnection(url, token,
doAsUser);
} catch (UndeclaredThrowableException e) {
throw new IOException(e.getCause());
} catch (AuthenticationException ae) {
throw new IOException(ae);
}
}
}
private static ConnectionConfigurator newConnConfigurator(Configuration conf) {
try {
return newSslConnConfigurator(DEFAULT_SOCKET_TIMEOUT, conf);
} catch (Exception e) {
LOG.debug("Cannot load customized ssl related configuration. " +
"Fallback to system-generic settings.", e);
return DEFAULT_TIMEOUT_CONN_CONFIGURATOR;
}
}
private static final ConnectionConfigurator DEFAULT_TIMEOUT_CONN_CONFIGURATOR =
new ConnectionConfigurator() {
@Override
public HttpURLConnection configure(HttpURLConnection conn)
throws IOException {
setTimeouts(conn, DEFAULT_SOCKET_TIMEOUT);
return conn;
}
};
private static ConnectionConfigurator newSslConnConfigurator(final int timeout,
Configuration conf) throws IOException, GeneralSecurityException {
final SSLFactory factory;
final SSLSocketFactory sf;
final HostnameVerifier hv;
factory = new SSLFactory(SSLFactory.Mode.CLIENT, conf);
factory.init();
sf = factory.createSSLSocketFactory();
hv = factory.getHostnameVerifier();
return new ConnectionConfigurator() {
@Override
public HttpURLConnection configure(HttpURLConnection conn)
throws IOException {
if (conn instanceof HttpsURLConnection) {
HttpsURLConnection c = (HttpsURLConnection) conn;
c.setSSLSocketFactory(sf);
c.setHostnameVerifier(hv);
}
setTimeouts(conn, timeout);
return conn;
}
};
}
private static void setTimeouts(URLConnection connection, int socketTimeout) {
connection.setConnectTimeout(socketTimeout);
connection.setReadTimeout(socketTimeout);
}
public static void main(String[] argv) throws Exception {
CommandLine cliParser = new GnuParser().parse(opts, argv);
if (cliParser.hasOption("put")) {
String path = cliParser.getOptionValue("put");
if (path != null && path.length() > 0) {
if (cliParser.hasOption(ENTITY_DATA_TYPE)) {
putTimelineDataInJSONFile(path, ENTITY_DATA_TYPE);
return;
} else if (cliParser.hasOption(DOMAIN_DATA_TYPE)) {
putTimelineDataInJSONFile(path, DOMAIN_DATA_TYPE);
return;
}
}
}
printUsage();
}
/**
* Put timeline data in a JSON file via command line.
*
* @param path
* path to the timeline data JSON file
* @param type
* the type of the timeline data in the JSON file
*/
private static void putTimelineDataInJSONFile(String path, String type) {
File jsonFile = new File(path);
if (!jsonFile.exists()) {
LOG.error("File [" + jsonFile.getAbsolutePath() + "] doesn't exist");
return;
}
ObjectMapper mapper = new ObjectMapper();
YarnJacksonJaxbJsonProvider.configObjectMapper(mapper);
TimelineEntities entities = null;
TimelineDomains domains = null;
try {
if (type.equals(ENTITY_DATA_TYPE)) {
entities = mapper.readValue(jsonFile, TimelineEntities.class);
} else if (type.equals(DOMAIN_DATA_TYPE)){
domains = mapper.readValue(jsonFile, TimelineDomains.class);
}
} catch (Exception e) {
LOG.error("Error when reading " + e.getMessage());
e.printStackTrace(System.err);
return;
}
Configuration conf = new YarnConfiguration();
TimelineClient client = TimelineClient.createTimelineClient();
client.init(conf);
client.start();
try {
if (UserGroupInformation.isSecurityEnabled()
&& conf.getBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, false)) {
Token<TimelineDelegationTokenIdentifier> token =
client.getDelegationToken(
UserGroupInformation.getCurrentUser().getUserName());
UserGroupInformation.getCurrentUser().addToken(token);
}
if (type.equals(ENTITY_DATA_TYPE)) {
TimelinePutResponse response = client.putEntities(
entities.getEntities().toArray(
new TimelineEntity[entities.getEntities().size()]));
if (response.getErrors().size() == 0) {
LOG.info("Timeline entities are successfully put");
} else {
for (TimelinePutResponse.TimelinePutError error : response.getErrors()) {
LOG.error("TimelineEntity [" + error.getEntityType() + ":" +
error.getEntityId() + "] is not successfully put. Error code: " +
error.getErrorCode());
}
}
} else if (type.equals(DOMAIN_DATA_TYPE)) {
boolean hasError = false;
for (TimelineDomain domain : domains.getDomains()) {
try {
client.putDomain(domain);
} catch (Exception e) {
LOG.error("Error when putting domain " + domain.getId(), e);
hasError = true;
}
}
if (!hasError) {
LOG.info("Timeline domains are successfully put");
}
}
} catch(RuntimeException e) {
LOG.error("Error when putting the timeline data", e);
} catch (Exception e) {
LOG.error("Error when putting the timeline data", e);
} finally {
client.stop();
}
}
/**
* Helper function to print out usage
*/
private static void printUsage() {
new HelpFormatter().printHelp("TimelineClient", opts);
}
@VisibleForTesting
@Private
public UserGroupInformation getUgi() {
return authUgi;
}
}
| 25,266 | 37.110106 | 92 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ProcfsBasedProcessTree.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.util;
import java.io.BufferedReader;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.InputStreamReader;
import java.io.IOException;
import java.math.BigInteger;
import java.nio.charset.Charset;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.apache.commons.io.IOUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.util.CpuTimeTracker;
import org.apache.hadoop.util.Shell;
import org.apache.hadoop.util.SysInfoLinux;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
/**
* A Proc file-system based ProcessTree. Works only on Linux.
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
public class ProcfsBasedProcessTree extends ResourceCalculatorProcessTree {
static final Log LOG = LogFactory
.getLog(ProcfsBasedProcessTree.class);
private static final String PROCFS = "/proc/";
private static final Pattern PROCFS_STAT_FILE_FORMAT = Pattern .compile(
"^([0-9-]+)\\s([^\\s]+)\\s[^\\s]\\s([0-9-]+)\\s([0-9-]+)\\s([0-9-]+)\\s" +
"([0-9-]+\\s){7}([0-9]+)\\s([0-9]+)\\s([0-9-]+\\s){7}([0-9]+)\\s([0-9]+)" +
"(\\s[0-9-]+){15}");
public static final String PROCFS_STAT_FILE = "stat";
public static final String PROCFS_CMDLINE_FILE = "cmdline";
public static final long PAGE_SIZE = SysInfoLinux.PAGE_SIZE;
public static final long JIFFY_LENGTH_IN_MILLIS =
SysInfoLinux.JIFFY_LENGTH_IN_MILLIS; // in millisecond
private final CpuTimeTracker cpuTimeTracker;
private Clock clock;
enum MemInfo {
SIZE("Size"), RSS("Rss"), PSS("Pss"), SHARED_CLEAN("Shared_Clean"),
SHARED_DIRTY("Shared_Dirty"), PRIVATE_CLEAN("Private_Clean"),
PRIVATE_DIRTY("Private_Dirty"), REFERENCED("Referenced"), ANONYMOUS(
"Anonymous"), ANON_HUGE_PAGES("AnonHugePages"), SWAP("swap"),
KERNEL_PAGE_SIZE("kernelPageSize"), MMU_PAGE_SIZE("mmuPageSize"), INVALID(
"invalid");
private String name;
private MemInfo(String name) {
this.name = name;
}
public static MemInfo getMemInfoByName(String name) {
for (MemInfo info : MemInfo.values()) {
if (info.name.trim().equalsIgnoreCase(name.trim())) {
return info;
}
}
return INVALID;
}
}
public static final String SMAPS = "smaps";
public static final int KB_TO_BYTES = 1024;
private static final String KB = "kB";
private static final String READ_ONLY_WITH_SHARED_PERMISSION = "r--s";
private static final String READ_EXECUTE_WITH_SHARED_PERMISSION = "r-xs";
private static final Pattern ADDRESS_PATTERN = Pattern
.compile("([[a-f]|(0-9)]*)-([[a-f]|(0-9)]*)(\\s)*([rxwps\\-]*)");
private static final Pattern MEM_INFO_PATTERN = Pattern
.compile("(^[A-Z].*):[\\s ]*(.*)");
private boolean smapsEnabled;
protected Map<String, ProcessTreeSmapMemInfo> processSMAPTree =
new HashMap<String, ProcessTreeSmapMemInfo>();
// to enable testing, using this variable which can be configured
// to a test directory.
private String procfsDir;
static private String deadPid = "-1";
private String pid = deadPid;
static private Pattern numberPattern = Pattern.compile("[1-9][0-9]*");
private long cpuTime = UNAVAILABLE;
protected Map<String, ProcessInfo> processTree =
new HashMap<String, ProcessInfo>();
public ProcfsBasedProcessTree(String pid) {
this(pid, PROCFS, new SystemClock());
}
@Override
public void setConf(Configuration conf) {
super.setConf(conf);
if (conf != null) {
smapsEnabled =
conf.getBoolean(YarnConfiguration.PROCFS_USE_SMAPS_BASED_RSS_ENABLED,
YarnConfiguration.DEFAULT_PROCFS_USE_SMAPS_BASED_RSS_ENABLED);
}
}
public ProcfsBasedProcessTree(String pid, String procfsDir) {
this(pid, procfsDir, new SystemClock());
}
/**
* Build a new process tree rooted at the pid.
*
* This method is provided mainly for testing purposes, where
* the root of the proc file system can be adjusted.
*
* @param pid root of the process tree
* @param procfsDir the root of a proc file system - only used for testing.
* @param clock clock for controlling time for testing
*/
public ProcfsBasedProcessTree(String pid, String procfsDir, Clock clock) {
super(pid);
this.clock = clock;
this.pid = getValidPID(pid);
this.procfsDir = procfsDir;
this.cpuTimeTracker = new CpuTimeTracker(JIFFY_LENGTH_IN_MILLIS);
}
/**
* Checks if the ProcfsBasedProcessTree is available on this system.
*
* @return true if ProcfsBasedProcessTree is available. False otherwise.
*/
public static boolean isAvailable() {
try {
if (!Shell.LINUX) {
LOG.info("ProcfsBasedProcessTree currently is supported only on "
+ "Linux.");
return false;
}
} catch (SecurityException se) {
LOG.warn("Failed to get Operating System name. " + se);
return false;
}
return true;
}
/**
* Update process-tree with latest state. If the root-process is not alive,
* tree will be empty.
*
*/
@Override
public void updateProcessTree() {
if (!pid.equals(deadPid)) {
// Get the list of processes
List<String> processList = getProcessList();
Map<String, ProcessInfo> allProcessInfo = new HashMap<String, ProcessInfo>();
// cache the processTree to get the age for processes
Map<String, ProcessInfo> oldProcs =
new HashMap<String, ProcessInfo>(processTree);
processTree.clear();
ProcessInfo me = null;
for (String proc : processList) {
// Get information for each process
ProcessInfo pInfo = new ProcessInfo(proc);
if (constructProcessInfo(pInfo, procfsDir) != null) {
allProcessInfo.put(proc, pInfo);
if (proc.equals(this.pid)) {
me = pInfo; // cache 'me'
processTree.put(proc, pInfo);
}
}
}
if (me == null) {
return;
}
// Add each process to its parent.
for (Map.Entry<String, ProcessInfo> entry : allProcessInfo.entrySet()) {
String pID = entry.getKey();
if (!pID.equals("1")) {
ProcessInfo pInfo = entry.getValue();
ProcessInfo parentPInfo = allProcessInfo.get(pInfo.getPpid());
if (parentPInfo != null) {
parentPInfo.addChild(pInfo);
}
}
}
// now start constructing the process-tree
LinkedList<ProcessInfo> pInfoQueue = new LinkedList<ProcessInfo>();
pInfoQueue.addAll(me.getChildren());
while (!pInfoQueue.isEmpty()) {
ProcessInfo pInfo = pInfoQueue.remove();
if (!processTree.containsKey(pInfo.getPid())) {
processTree.put(pInfo.getPid(), pInfo);
}
pInfoQueue.addAll(pInfo.getChildren());
}
// update age values and compute the number of jiffies since last update
for (Map.Entry<String, ProcessInfo> procs : processTree.entrySet()) {
ProcessInfo oldInfo = oldProcs.get(procs.getKey());
if (procs.getValue() != null) {
procs.getValue().updateJiffy(oldInfo);
if (oldInfo != null) {
procs.getValue().updateAge(oldInfo);
}
}
}
if (LOG.isDebugEnabled()) {
// Log.debug the ProcfsBasedProcessTree
LOG.debug(this.toString());
}
if (smapsEnabled) {
//Update smaps info
processSMAPTree.clear();
for (ProcessInfo p : processTree.values()) {
if (p != null) {
// Get information for each process
ProcessTreeSmapMemInfo memInfo = new ProcessTreeSmapMemInfo(p.getPid());
constructProcessSMAPInfo(memInfo, procfsDir);
processSMAPTree.put(p.getPid(), memInfo);
}
}
}
}
}
/** Verify that the given process id is same as its process group id.
* @return true if the process id matches else return false.
*/
@Override
public boolean checkPidPgrpidForMatch() {
return checkPidPgrpidForMatch(pid, PROCFS);
}
public static boolean checkPidPgrpidForMatch(String _pid, String procfs) {
// Get information for this process
ProcessInfo pInfo = new ProcessInfo(_pid);
pInfo = constructProcessInfo(pInfo, procfs);
// null if process group leader finished execution; issue no warning
// make sure that pid and its pgrpId match
if (pInfo == null) return true;
String pgrpId = pInfo.getPgrpId().toString();
return pgrpId.equals(_pid);
}
private static final String PROCESSTREE_DUMP_FORMAT =
"\t|- %s %s %d %d %s %d %d %d %d %s%n";
public List<String> getCurrentProcessIDs() {
List<String> currentPIDs = new ArrayList<String>();
currentPIDs.addAll(processTree.keySet());
return currentPIDs;
}
/**
* Get a dump of the process-tree.
*
* @return a string concatenating the dump of information of all the processes
* in the process-tree
*/
@Override
public String getProcessTreeDump() {
StringBuilder ret = new StringBuilder();
// The header.
ret.append(String.format("\t|- PID PPID PGRPID SESSID CMD_NAME "
+ "USER_MODE_TIME(MILLIS) SYSTEM_TIME(MILLIS) VMEM_USAGE(BYTES) "
+ "RSSMEM_USAGE(PAGES) FULL_CMD_LINE%n"));
for (ProcessInfo p : processTree.values()) {
if (p != null) {
ret.append(String.format(PROCESSTREE_DUMP_FORMAT, p.getPid(), p
.getPpid(), p.getPgrpId(), p.getSessionId(), p.getName(), p
.getUtime(), p.getStime(), p.getVmem(), p.getRssmemPage(), p
.getCmdLine(procfsDir)));
}
}
return ret.toString();
}
@Override
public long getVirtualMemorySize(int olderThanAge) {
long total = UNAVAILABLE;
for (ProcessInfo p : processTree.values()) {
if (p != null) {
if (total == UNAVAILABLE ) {
total = 0;
}
if (p.getAge() > olderThanAge) {
total += p.getVmem();
}
}
}
return total;
}
@Override
@SuppressWarnings("deprecation")
public long getCumulativeVmem(int olderThanAge) {
return getVirtualMemorySize(olderThanAge);
}
@Override
public long getRssMemorySize(int olderThanAge) {
if (PAGE_SIZE < 0) {
return UNAVAILABLE;
}
if (smapsEnabled) {
return getSmapBasedRssMemorySize(olderThanAge);
}
boolean isAvailable = false;
long totalPages = 0;
for (ProcessInfo p : processTree.values()) {
if ((p != null) ) {
if (p.getAge() > olderThanAge) {
totalPages += p.getRssmemPage();
}
isAvailable = true;
}
}
return isAvailable ? totalPages * PAGE_SIZE : UNAVAILABLE; // convert # pages to byte
}
@Override
@SuppressWarnings("deprecation")
public long getCumulativeRssmem(int olderThanAge) {
return getRssMemorySize(olderThanAge);
}
/**
* Get the resident set size (RSS) memory used by all the processes
* in the process-tree that are older than the passed in age. RSS is
* calculated based on SMAP information. Skip mappings with "r--s", "r-xs"
* permissions to get real RSS usage of the process.
*
* @param olderThanAge
* processes above this age are included in the memory addition
* @return rss memory used by the process-tree in bytes, for
* processes older than this age. return {@link #UNAVAILABLE} if it cannot
* be calculated.
*/
private long getSmapBasedRssMemorySize(int olderThanAge) {
long total = UNAVAILABLE;
for (ProcessInfo p : processTree.values()) {
if (p != null) {
// set resource to 0 instead of UNAVAILABLE
if (total == UNAVAILABLE){
total = 0;
}
if (p.getAge() > olderThanAge) {
ProcessTreeSmapMemInfo procMemInfo = processSMAPTree.get(p.getPid());
if (procMemInfo != null) {
for (ProcessSmapMemoryInfo info : procMemInfo.getMemoryInfoList()) {
// Do not account for r--s or r-xs mappings
if (info.getPermission().trim()
.equalsIgnoreCase(READ_ONLY_WITH_SHARED_PERMISSION)
|| info.getPermission().trim()
.equalsIgnoreCase(READ_EXECUTE_WITH_SHARED_PERMISSION)) {
continue;
}
total +=
Math.min(info.sharedDirty, info.pss) + info.privateDirty
+ info.privateClean;
if (LOG.isDebugEnabled()) {
LOG.debug(" total(" + olderThanAge + "): PID : " + p.getPid()
+ ", SharedDirty : " + info.sharedDirty + ", PSS : "
+ info.pss + ", Private_Dirty : " + info.privateDirty
+ ", Private_Clean : " + info.privateClean + ", total : "
+ (total * KB_TO_BYTES));
}
}
}
if (LOG.isDebugEnabled()) {
LOG.debug(procMemInfo.toString());
}
}
}
}
if (total > 0) {
total *= KB_TO_BYTES; // convert to bytes
}
LOG.info("SmapBasedCumulativeRssmem (bytes) : " + total);
return total; // size
}
@Override
public long getCumulativeCpuTime() {
if (JIFFY_LENGTH_IN_MILLIS < 0) {
return UNAVAILABLE;
}
long incJiffies = 0;
boolean isAvailable = false;
for (ProcessInfo p : processTree.values()) {
if (p != null) {
incJiffies += p.getDtime();
// data is available
isAvailable = true;
}
}
if (isAvailable) {
// reset cpuTime to 0 instead of UNAVAILABLE
if (cpuTime == UNAVAILABLE) {
cpuTime = 0L;
}
cpuTime += incJiffies * JIFFY_LENGTH_IN_MILLIS;
}
return cpuTime;
}
private BigInteger getTotalProcessJiffies() {
BigInteger totalStime = BigInteger.ZERO;
long totalUtime = 0;
for (ProcessInfo p : processTree.values()) {
if (p != null) {
totalUtime += p.getUtime();
totalStime = totalStime.add(p.getStime());
}
}
return totalStime.add(BigInteger.valueOf(totalUtime));
}
@Override
public float getCpuUsagePercent() {
BigInteger processTotalJiffies = getTotalProcessJiffies();
cpuTimeTracker.updateElapsedJiffies(processTotalJiffies,
clock.getTime());
return cpuTimeTracker.getCpuTrackerUsagePercent();
}
private static String getValidPID(String pid) {
if (pid == null) return deadPid;
Matcher m = numberPattern.matcher(pid);
if (m.matches()) return pid;
return deadPid;
}
/**
* Get the list of all processes in the system.
*/
private List<String> getProcessList() {
String[] processDirs = (new File(procfsDir)).list();
List<String> processList = new ArrayList<String>();
for (String dir : processDirs) {
Matcher m = numberPattern.matcher(dir);
if (!m.matches()) continue;
try {
if ((new File(procfsDir, dir)).isDirectory()) {
processList.add(dir);
}
} catch (SecurityException s) {
// skip this process
}
}
return processList;
}
/**
* Construct the ProcessInfo using the process' PID and procfs rooted at the
* specified directory and return the same. It is provided mainly to assist
* testing purposes.
*
* Returns null on failing to read from procfs,
*
* @param pinfo ProcessInfo that needs to be updated
* @param procfsDir root of the proc file system
* @return updated ProcessInfo, null on errors.
*/
private static ProcessInfo constructProcessInfo(ProcessInfo pinfo,
String procfsDir) {
ProcessInfo ret = null;
// Read "procfsDir/<pid>/stat" file - typically /proc/<pid>/stat
BufferedReader in = null;
InputStreamReader fReader = null;
try {
File pidDir = new File(procfsDir, pinfo.getPid());
fReader = new InputStreamReader(
new FileInputStream(
new File(pidDir, PROCFS_STAT_FILE)), Charset.forName("UTF-8"));
in = new BufferedReader(fReader);
} catch (FileNotFoundException f) {
// The process vanished in the interim!
return ret;
}
ret = pinfo;
try {
String str = in.readLine(); // only one line
Matcher m = PROCFS_STAT_FILE_FORMAT.matcher(str);
boolean mat = m.find();
if (mat) {
// Set (name) (ppid) (pgrpId) (session) (utime) (stime) (vsize) (rss)
pinfo.updateProcessInfo(m.group(2), m.group(3),
Integer.parseInt(m.group(4)), Integer.parseInt(m.group(5)),
Long.parseLong(m.group(7)), new BigInteger(m.group(8)),
Long.parseLong(m.group(10)), Long.parseLong(m.group(11)));
} else {
LOG.warn("Unexpected: procfs stat file is not in the expected format"
+ " for process with pid " + pinfo.getPid());
ret = null;
}
} catch (IOException io) {
LOG.warn("Error reading the stream " + io);
ret = null;
} finally {
// Close the streams
try {
fReader.close();
try {
in.close();
} catch (IOException i) {
LOG.warn("Error closing the stream " + in);
}
} catch (IOException i) {
LOG.warn("Error closing the stream " + fReader);
}
}
return ret;
}
/**
* Returns a string printing PIDs of process present in the
* ProcfsBasedProcessTree. Output format : [pid pid ..]
*/
@Override
public String toString() {
StringBuffer pTree = new StringBuffer("[ ");
for (String p : processTree.keySet()) {
pTree.append(p);
pTree.append(" ");
}
return pTree.substring(0, pTree.length()) + "]";
}
/**
*
* Class containing information of a process.
*
*/
private static class ProcessInfo {
private String pid; // process-id
private String name; // command name
private Integer pgrpId; // process group-id
private String ppid; // parent process-id
private Integer sessionId; // session-id
private Long vmem; // virtual memory usage
private Long rssmemPage; // rss memory usage in # of pages
private Long utime = 0L; // # of jiffies in user mode
private final BigInteger MAX_LONG = BigInteger.valueOf(Long.MAX_VALUE);
private BigInteger stime = new BigInteger("0"); // # of jiffies in kernel mode
// how many times has this process been seen alive
private int age;
// # of jiffies used since last update:
private Long dtime = 0L;
// dtime = (utime + stime) - (utimeOld + stimeOld)
// We need this to compute the cumulative CPU time
// because the subprocess may finish earlier than root process
private List<ProcessInfo> children = new ArrayList<ProcessInfo>(); // list of children
public ProcessInfo(String pid) {
this.pid = pid;
// seeing this the first time.
this.age = 1;
}
public String getPid() {
return pid;
}
public String getName() {
return name;
}
public Integer getPgrpId() {
return pgrpId;
}
public String getPpid() {
return ppid;
}
public Integer getSessionId() {
return sessionId;
}
public Long getVmem() {
return vmem;
}
public Long getUtime() {
return utime;
}
public BigInteger getStime() {
return stime;
}
public Long getDtime() {
return dtime;
}
public Long getRssmemPage() { // get rss # of pages
return rssmemPage;
}
public int getAge() {
return age;
}
public void updateProcessInfo(String name, String ppid, Integer pgrpId,
Integer sessionId, Long utime, BigInteger stime, Long vmem, Long rssmem) {
this.name = name;
this.ppid = ppid;
this.pgrpId = pgrpId;
this.sessionId = sessionId;
this.utime = utime;
this.stime = stime;
this.vmem = vmem;
this.rssmemPage = rssmem;
}
public void updateJiffy(ProcessInfo oldInfo) {
if (oldInfo == null) {
BigInteger sum = this.stime.add(BigInteger.valueOf(this.utime));
if (sum.compareTo(MAX_LONG) > 0) {
this.dtime = 0L;
LOG.warn("Sum of stime (" + this.stime + ") and utime (" + this.utime
+ ") is greater than " + Long.MAX_VALUE);
} else {
this.dtime = sum.longValue();
}
return;
}
this.dtime = (this.utime - oldInfo.utime +
this.stime.subtract(oldInfo.stime).longValue());
}
public void updateAge(ProcessInfo oldInfo) {
this.age = oldInfo.age + 1;
}
public boolean addChild(ProcessInfo p) {
return children.add(p);
}
public List<ProcessInfo> getChildren() {
return children;
}
public String getCmdLine(String procfsDir) {
String ret = "N/A";
if (pid == null) {
return ret;
}
BufferedReader in = null;
InputStreamReader fReader = null;
try {
fReader = new InputStreamReader(
new FileInputStream(
new File(new File(procfsDir, pid.toString()), PROCFS_CMDLINE_FILE)),
Charset.forName("UTF-8"));
} catch (FileNotFoundException f) {
// The process vanished in the interim!
return ret;
}
in = new BufferedReader(fReader);
try {
ret = in.readLine(); // only one line
if (ret == null) {
ret = "N/A";
} else {
ret = ret.replace('\0', ' '); // Replace each null char with a space
if (ret.equals("")) {
// The cmdline might be empty because the process is swapped out or
// is a zombie.
ret = "N/A";
}
}
} catch (IOException io) {
LOG.warn("Error reading the stream " + io);
ret = "N/A";
} finally {
// Close the streams
try {
fReader.close();
try {
in.close();
} catch (IOException i) {
LOG.warn("Error closing the stream " + in);
}
} catch (IOException i) {
LOG.warn("Error closing the stream " + fReader);
}
}
return ret;
}
}
/**
* Update memory related information
*
* @param pInfo
* @param procfsDir
*/
private static void constructProcessSMAPInfo(ProcessTreeSmapMemInfo pInfo,
String procfsDir) {
BufferedReader in = null;
InputStreamReader fReader = null;
try {
File pidDir = new File(procfsDir, pInfo.getPid());
File file = new File(pidDir, SMAPS);
if (!file.exists()) {
return;
}
fReader = new InputStreamReader(
new FileInputStream(file), Charset.forName("UTF-8"));
in = new BufferedReader(fReader);
ProcessSmapMemoryInfo memoryMappingInfo = null;
List<String> lines = IOUtils.readLines(in);
for (String line : lines) {
line = line.trim();
try {
Matcher address = ADDRESS_PATTERN.matcher(line);
if (address.find()) {
memoryMappingInfo = new ProcessSmapMemoryInfo(line);
memoryMappingInfo.setPermission(address.group(4));
pInfo.getMemoryInfoList().add(memoryMappingInfo);
continue;
}
Matcher memInfo = MEM_INFO_PATTERN.matcher(line);
if (memInfo.find()) {
String key = memInfo.group(1).trim();
String value = memInfo.group(2).replace(KB, "").trim();
if (LOG.isDebugEnabled()) {
LOG.debug("MemInfo : " + key + " : Value : " + value);
}
memoryMappingInfo.setMemInfo(key, value);
}
} catch (Throwable t) {
LOG
.warn("Error parsing smaps line : " + line + "; " + t.getMessage());
}
}
} catch (FileNotFoundException f) {
LOG.error(f.getMessage());
} catch (IOException e) {
LOG.error(e.getMessage());
} catch (Throwable t) {
LOG.error(t.getMessage());
} finally {
IOUtils.closeQuietly(in);
}
}
/**
* Placeholder for process's SMAPS information
*/
static class ProcessTreeSmapMemInfo {
private String pid;
private List<ProcessSmapMemoryInfo> memoryInfoList;
public ProcessTreeSmapMemInfo(String pid) {
this.pid = pid;
this.memoryInfoList = new LinkedList<ProcessSmapMemoryInfo>();
}
public List<ProcessSmapMemoryInfo> getMemoryInfoList() {
return memoryInfoList;
}
public String getPid() {
return pid;
}
public String toString() {
StringBuilder sb = new StringBuilder();
for (ProcessSmapMemoryInfo info : memoryInfoList) {
sb.append("\n");
sb.append(info.toString());
}
return sb.toString();
}
}
/**
* <pre>
* Private Pages : Pages that were mapped only by the process
* Shared Pages : Pages that were shared with other processes
*
* Clean Pages : Pages that have not been modified since they were mapped
* Dirty Pages : Pages that have been modified since they were mapped
*
* Private RSS = Private Clean Pages + Private Dirty Pages
* Shared RSS = Shared Clean Pages + Shared Dirty Pages
* RSS = Private RSS + Shared RSS
* PSS = The count of all pages mapped uniquely by the process,
* plus a fraction of each shared page, said fraction to be
* proportional to the number of processes which have mapped the page.
*
* </pre>
*/
static class ProcessSmapMemoryInfo {
private int size;
private int rss;
private int pss;
private int sharedClean;
private int sharedDirty;
private int privateClean;
private int privateDirty;
private int referenced;
private String regionName;
private String permission;
public ProcessSmapMemoryInfo(String name) {
this.regionName = name;
}
public String getName() {
return regionName;
}
public void setPermission(String permission) {
this.permission = permission;
}
public String getPermission() {
return permission;
}
public int getSize() {
return size;
}
public int getRss() {
return rss;
}
public int getPss() {
return pss;
}
public int getSharedClean() {
return sharedClean;
}
public int getSharedDirty() {
return sharedDirty;
}
public int getPrivateClean() {
return privateClean;
}
public int getPrivateDirty() {
return privateDirty;
}
public int getReferenced() {
return referenced;
}
public void setMemInfo(String key, String value) {
MemInfo info = MemInfo.getMemInfoByName(key);
int val = 0;
try {
val = Integer.parseInt(value.trim());
} catch (NumberFormatException ne) {
LOG.error("Error in parsing : " + info + " : value" + value.trim());
return;
}
if (info == null) {
return;
}
if (LOG.isDebugEnabled()) {
LOG.debug("setMemInfo : memInfo : " + info);
}
switch (info) {
case SIZE:
size = val;
break;
case RSS:
rss = val;
break;
case PSS:
pss = val;
break;
case SHARED_CLEAN:
sharedClean = val;
break;
case SHARED_DIRTY:
sharedDirty = val;
break;
case PRIVATE_CLEAN:
privateClean = val;
break;
case PRIVATE_DIRTY:
privateDirty = val;
break;
case REFERENCED:
referenced = val;
break;
default:
break;
}
}
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append("\t").append(this.getName()).append("\n");
sb.append("\t").append(MemInfo.SIZE.name + ":" + this.getSize())
.append(" kB\n");
sb.append("\t").append(MemInfo.PSS.name + ":" + this.getPss())
.append(" kB\n");
sb.append("\t").append(MemInfo.RSS.name + ":" + this.getRss())
.append(" kB\n");
sb.append("\t")
.append(MemInfo.SHARED_CLEAN.name + ":" + this.getSharedClean())
.append(" kB\n");
sb.append("\t")
.append(MemInfo.SHARED_DIRTY.name + ":" + this.getSharedDirty())
.append(" kB\n");
sb.append("\t")
.append(MemInfo.PRIVATE_CLEAN.name + ":" + this.getPrivateClean())
.append(" kB\n");
sb.append("\t")
.append(MemInfo.PRIVATE_DIRTY.name + ":" + this.getPrivateDirty())
.append(" kB\n");
sb.append("\t")
.append(MemInfo.REFERENCED.name + ":" + this.getReferenced())
.append(" kB\n");
sb.append("\t")
.append(MemInfo.PRIVATE_DIRTY.name + ":" + this.getPrivateDirty())
.append(" kB\n");
sb.append("\t")
.append(MemInfo.PRIVATE_DIRTY.name + ":" + this.getPrivateDirty())
.append(" kB\n");
return sb.toString();
}
}
/**
* Test the {@link ProcfsBasedProcessTree}
*
* @param args
*/
public static void main(String[] args) {
if (args.length != 1) {
System.out.println("Provide <pid of process to monitor>");
return;
}
int numprocessors =
ResourceCalculatorPlugin.getResourceCalculatorPlugin(null, null)
.getNumProcessors();
System.out.println("Number of processors " + numprocessors);
System.out.println("Creating ProcfsBasedProcessTree for process " +
args[0]);
ProcfsBasedProcessTree procfsBasedProcessTree = new
ProcfsBasedProcessTree(args[0]);
procfsBasedProcessTree.updateProcessTree();
System.out.println(procfsBasedProcessTree.getProcessTreeDump());
System.out.println("Get cpu usage " + procfsBasedProcessTree
.getCpuUsagePercent());
try {
// Sleep so we can compute the CPU usage
Thread.sleep(500L);
} catch (InterruptedException e) {
// do nothing
}
procfsBasedProcessTree.updateProcessTree();
System.out.println(procfsBasedProcessTree.getProcessTreeDump());
System.out.println("Cpu usage " + procfsBasedProcessTree
.getCpuUsagePercent());
System.out.println("Vmem usage in bytes " + procfsBasedProcessTree
.getVirtualMemorySize());
System.out.println("Rss mem usage in bytes " + procfsBasedProcessTree
.getRssMemorySize());
}
}
| 31,563 | 29.614937 | 90 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/RMHAUtils.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.util;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.ha.HAServiceProtocol;
import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
import org.apache.hadoop.ha.HAServiceTarget;
import org.apache.hadoop.yarn.client.RMHAServiceTarget;
import org.apache.hadoop.yarn.conf.HAUtil;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
@Private
@Unstable
public class RMHAUtils {
public static String findActiveRMHAId(YarnConfiguration conf) {
YarnConfiguration yarnConf = new YarnConfiguration(conf);
Collection<String> rmIds =
yarnConf.getStringCollection(YarnConfiguration.RM_HA_IDS);
for (String currentId : rmIds) {
yarnConf.set(YarnConfiguration.RM_HA_ID, currentId);
try {
HAServiceState haState = getHAState(yarnConf);
if (haState.equals(HAServiceState.ACTIVE)) {
return currentId;
}
} catch (Exception e) {
// Couldn't check if this RM is active. Do nothing. Worst case,
// we wouldn't find an Active RM and return null.
}
}
return null; // Couldn't find an Active RM
}
private static HAServiceState getHAState(YarnConfiguration yarnConf)
throws Exception {
HAServiceTarget haServiceTarget;
int rpcTimeoutForChecks =
yarnConf.getInt(CommonConfigurationKeys.HA_FC_CLI_CHECK_TIMEOUT_KEY,
CommonConfigurationKeys.HA_FC_CLI_CHECK_TIMEOUT_DEFAULT);
yarnConf.set(CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY,
yarnConf.get(YarnConfiguration.RM_PRINCIPAL, ""));
haServiceTarget = new RMHAServiceTarget(yarnConf);
HAServiceProtocol proto =
haServiceTarget.getProxy(yarnConf, rpcTimeoutForChecks);
HAServiceState haState = proto.getServiceStatus().getState();
return haState;
}
public static List<String> getRMHAWebappAddresses(
final YarnConfiguration conf) {
String prefix;
String defaultPort;
if (YarnConfiguration.useHttps(conf)) {
prefix = YarnConfiguration.RM_WEBAPP_HTTPS_ADDRESS;
defaultPort = ":" + YarnConfiguration.DEFAULT_RM_WEBAPP_HTTPS_PORT;
} else {
prefix =YarnConfiguration.RM_WEBAPP_ADDRESS;
defaultPort = ":" + YarnConfiguration.DEFAULT_RM_WEBAPP_PORT;
}
Collection<String> rmIds =
conf.getStringCollection(YarnConfiguration.RM_HA_IDS);
List<String> addrs = new ArrayList<String>();
for (String id : rmIds) {
String addr = conf.get(HAUtil.addSuffix(prefix, id));
if (addr == null) {
String hostname =
conf.get(HAUtil.addSuffix(YarnConfiguration.RM_HOSTNAME, id));
if (hostname != null) {
addr = hostname + defaultPort;
}
}
if (addr != null) {
addrs.add(addr);
}
}
return addrs;
}
}
| 3,854 | 36.427184 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/package-info.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
@InterfaceAudience.Public
package org.apache.hadoop.yarn.util;
import org.apache.hadoop.classification.InterfaceAudience;
| 929 | 41.272727 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/RackResolver.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.util;
import java.util.ArrayList;
import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.net.CachedDNSToSwitchMapping;
import org.apache.hadoop.net.DNSToSwitchMapping;
import org.apache.hadoop.net.NetworkTopology;
import org.apache.hadoop.net.Node;
import org.apache.hadoop.net.NodeBase;
import org.apache.hadoop.net.ScriptBasedMapping;
import org.apache.hadoop.util.ReflectionUtils;
import com.google.common.annotations.VisibleForTesting;
@InterfaceAudience.LimitedPrivate({"YARN", "MAPREDUCE"})
public class RackResolver {
private static DNSToSwitchMapping dnsToSwitchMapping;
private static boolean initCalled = false;
private static final Log LOG = LogFactory.getLog(RackResolver.class);
public synchronized static void init(Configuration conf) {
if (initCalled) {
return;
} else {
initCalled = true;
}
Class<? extends DNSToSwitchMapping> dnsToSwitchMappingClass =
conf.getClass(
CommonConfigurationKeysPublic.NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY,
ScriptBasedMapping.class,
DNSToSwitchMapping.class);
try {
DNSToSwitchMapping newInstance = ReflectionUtils.newInstance(
dnsToSwitchMappingClass, conf);
// Wrap around the configured class with the Cached implementation so as
// to save on repetitive lookups.
// Check if the impl is already caching, to avoid double caching.
dnsToSwitchMapping =
((newInstance instanceof CachedDNSToSwitchMapping) ? newInstance
: new CachedDNSToSwitchMapping(newInstance));
} catch (Exception e) {
throw new RuntimeException(e);
}
}
/**
* Utility method for getting a hostname resolved to a node in the
* network topology. This method initializes the class with the
* right resolver implementation.
* @param conf
* @param hostName
* @return node {@link Node} after resolving the hostname
*/
public static Node resolve(Configuration conf, String hostName) {
init(conf);
return coreResolve(hostName);
}
/**
* Utility method for getting a hostname resolved to a node in the
* network topology. This method doesn't initialize the class.
* Call {@link #init(Configuration)} explicitly.
* @param hostName
* @return node {@link Node} after resolving the hostname
*/
public static Node resolve(String hostName) {
if (!initCalled) {
throw new IllegalStateException("RackResolver class not yet initialized");
}
return coreResolve(hostName);
}
private static Node coreResolve(String hostName) {
List <String> tmpList = new ArrayList<String>(1);
tmpList.add(hostName);
List <String> rNameList = dnsToSwitchMapping.resolve(tmpList);
String rName = null;
if (rNameList == null || rNameList.get(0) == null) {
rName = NetworkTopology.DEFAULT_RACK;
if (LOG.isDebugEnabled()) {
LOG.debug("Couldn't resolve " + hostName + ". Falling back to "
+ NetworkTopology.DEFAULT_RACK);
}
} else {
rName = rNameList.get(0);
if (LOG.isDebugEnabled()) {
LOG.debug("Resolved " + hostName + " to " + rName);
}
}
return new NodeBase(hostName, rName);
}
/**
* Only used by tests
*/
@Private
@VisibleForTesting
static DNSToSwitchMapping getDnsToSwitchMapping(){
return dnsToSwitchMapping;
}
}
| 4,490 | 34.362205 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/Log4jWarningErrorMetricsAppender.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.util;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.Time;
import org.apache.log4j.AppenderSkeleton;
import org.apache.log4j.Level;
import org.apache.log4j.Logger;
import org.apache.log4j.spi.LoggingEvent;
import java.util.*;
@InterfaceAudience.Private
@InterfaceStability.Unstable
public class Log4jWarningErrorMetricsAppender extends AppenderSkeleton {
public static final String LOG_METRICS_APPENDER = "RM_LOG_METRICS_APPENDER";
static final int MAX_MESSAGE_SIZE = 2048;
static public class Element {
public Long count;
public Long timestampSeconds;
Element(Long count, Long timestampSeconds) {
this.count = count;
this.timestampSeconds = timestampSeconds;
}
}
static class PurgeElement implements Comparable<PurgeElement> {
String message;
Long timestamp;
PurgeElement(String message, Long timestamp) {
this.message = message;
this.timestamp = timestamp;
}
public int compareTo(PurgeElement e) {
if (e == null) {
throw new NullPointerException("Null element passed to compareTo");
}
int ret = this.timestamp.compareTo(e.timestamp);
if (ret != 0) {
return ret;
}
return this.message.compareTo(e.message);
}
@Override
public boolean equals(Object e) {
if (e == null || !(e instanceof PurgeElement)) {
return false;
}
if (e == this) {
return true;
}
PurgeElement el = (PurgeElement) e;
return (this.message.equals(el.message))
&& (this.timestamp.equals(el.timestamp));
}
@Override
public int hashCode() {
return this.timestamp.hashCode();
}
}
Map<String, SortedMap<Long, Integer>> errors;
Map<String, SortedMap<Long, Integer>> warnings;
SortedMap<Long, Integer> errorsTimestampCount;
SortedMap<Long, Integer> warningsTimestampCount;
SortedSet<PurgeElement> errorsPurgeInformation;
SortedSet<PurgeElement> warningsPurgeInformation;
Timer cleanupTimer;
long cleanupInterval;
long messageAgeLimitSeconds;
int maxUniqueMessages;
final Object lock = new Object();
/**
* Create an appender to keep track of the errors and warnings logged by the
* system. It will keep purge messages older than 2 days. It will store upto
* the last 500 unique errors and the last 500 unique warnings. The thread to
* purge message will run every 5 minutes, unless the 500 message limit is hit
* earlier.
*/
public Log4jWarningErrorMetricsAppender() {
this(5 * 60, 24 * 60 * 60, 250);
}
/**
* Create an appender to keep track of the errors and warnings logged by the
* system.
*
* @param cleanupIntervalSeconds
* the interval at which old messages are purged to prevent the
* message stores from growing unbounded
* @param messageAgeLimitSeconds
* the maximum age of a message in seconds before it is purged from
* the store
* @param maxUniqueMessages
* the maximum number of unique messages of each type we keep before
* we start purging
*/
public Log4jWarningErrorMetricsAppender(int cleanupIntervalSeconds,
long messageAgeLimitSeconds, int maxUniqueMessages) {
super();
errors = new HashMap<>();
warnings = new HashMap<>();
errorsTimestampCount = new TreeMap<>();
warningsTimestampCount = new TreeMap<>();
errorsPurgeInformation = new TreeSet<>();
warningsPurgeInformation = new TreeSet<>();
cleanupTimer = new Timer();
cleanupInterval = cleanupIntervalSeconds * 1000;
cleanupTimer.schedule(new ErrorAndWarningsCleanup(), cleanupInterval);
this.messageAgeLimitSeconds = messageAgeLimitSeconds;
this.maxUniqueMessages = maxUniqueMessages;
this.setName(LOG_METRICS_APPENDER);
this.setThreshold(Level.WARN);
}
/**
* {@inheritDoc}
*/
@Override
protected void append(LoggingEvent event) {
String message = event.getRenderedMessage();
String[] throwableStr = event.getThrowableStrRep();
if (throwableStr != null) {
message = message + "\n" + StringUtils.join("\n", throwableStr);
message =
org.apache.commons.lang.StringUtils.left(message, MAX_MESSAGE_SIZE);
}
int level = event.getLevel().toInt();
if (level == Level.WARN_INT || level == Level.ERROR_INT) {
// store second level information
Long eventTimeSeconds = event.getTimeStamp() / 1000;
Map<String, SortedMap<Long, Integer>> map;
SortedMap<Long, Integer> timestampsCount;
SortedSet<PurgeElement> purgeInformation;
if (level == Level.WARN_INT) {
map = warnings;
timestampsCount = warningsTimestampCount;
purgeInformation = warningsPurgeInformation;
} else {
map = errors;
timestampsCount = errorsTimestampCount;
purgeInformation = errorsPurgeInformation;
}
updateMessageDetails(message, eventTimeSeconds, map, timestampsCount,
purgeInformation);
}
}
private void updateMessageDetails(String message, Long eventTimeSeconds,
Map<String, SortedMap<Long, Integer>> map,
SortedMap<Long, Integer> timestampsCount,
SortedSet<PurgeElement> purgeInformation) {
synchronized (lock) {
if (map.containsKey(message)) {
SortedMap<Long, Integer> tmp = map.get(message);
Long lastMessageTime = tmp.lastKey();
int value = 1;
if (tmp.containsKey(eventTimeSeconds)) {
value = tmp.get(eventTimeSeconds) + 1;
}
tmp.put(eventTimeSeconds, value);
purgeInformation.remove(new PurgeElement(message, lastMessageTime));
} else {
SortedMap<Long, Integer> value = new TreeMap<>();
value.put(eventTimeSeconds, 1);
map.put(message, value);
if (map.size() > maxUniqueMessages * 2) {
cleanupTimer.cancel();
cleanupTimer = new Timer();
cleanupTimer.schedule(new ErrorAndWarningsCleanup(), 0);
}
}
purgeInformation.add(new PurgeElement(message, eventTimeSeconds));
int newValue = 1;
if (timestampsCount.containsKey(eventTimeSeconds)) {
newValue = timestampsCount.get(eventTimeSeconds) + 1;
}
timestampsCount.put(eventTimeSeconds, newValue);
}
}
/**
* {@inheritDoc}
*/
@Override
public void close() {
cleanupTimer.cancel();
}
/**
* {@inheritDoc}
*/
@Override
public boolean requiresLayout() {
return false;
}
/**
* Get the counts of errors in the time periods provided. Note that the counts
* provided by this function may differ from the ones provided by
* getErrorMessagesAndCounts since the message store is purged at regular
* intervals to prevent it from growing without bounds, while the store for
* the counts is purged less frequently.
*
* @param cutoffs
* list of timestamp cutoffs(in seconds) for which the counts are
* desired
* @return list of error counts in the time periods corresponding to cutoffs
*/
public List<Integer> getErrorCounts(List<Long> cutoffs) {
return this.getCounts(errorsTimestampCount, cutoffs);
}
/**
* Get the counts of warnings in the time periods provided. Note that the
* counts provided by this function may differ from the ones provided by
* getWarningMessagesAndCounts since the message store is purged at regular
* intervals to prevent it from growing without bounds, while the store for
* the counts is purged less frequently.
*
* @param cutoffs
* list of timestamp cutoffs(in seconds) for which the counts are
* desired
* @return list of warning counts in the time periods corresponding to cutoffs
*/
public List<Integer> getWarningCounts(List<Long> cutoffs) {
return this.getCounts(warningsTimestampCount, cutoffs);
}
private List<Integer> getCounts(SortedMap<Long, Integer> map,
List<Long> cutoffs) {
List<Integer> ret = new ArrayList<>();
Long largestCutoff = Collections.min(cutoffs);
for (int i = 0; i < cutoffs.size(); ++i) {
ret.add(0);
}
synchronized (lock) {
Map<Long, Integer> submap = map.tailMap(largestCutoff);
for (Map.Entry<Long, Integer> entry : submap.entrySet()) {
for (int i = 0; i < cutoffs.size(); ++i) {
if (entry.getKey() >= cutoffs.get(i)) {
int tmp = ret.get(i);
ret.set(i, tmp + entry.getValue());
}
}
}
}
return ret;
}
/**
* Get the errors and the number of occurrences for each of the errors for the
* time cutoffs provided. Note that the counts provided by this function may
* differ from the ones provided by getErrorCounts since the message store is
* purged at regular intervals to prevent it from growing without bounds,
* while the store for the counts is purged less frequently.
*
* @param cutoffs
* list of timestamp cutoffs(in seconds) for which the counts are
* desired
* @return list of maps corresponding for each cutoff provided; each map
* contains the error and the number of times the error occurred in
* the time period
*/
public List<Map<String, Element>>
getErrorMessagesAndCounts(List<Long> cutoffs) {
return this.getElementsAndCounts(errors, cutoffs, errorsPurgeInformation);
}
/**
* Get the warning and the number of occurrences for each of the warnings for
* the time cutoffs provided. Note that the counts provided by this function
* may differ from the ones provided by getWarningCounts since the message
* store is purged at regular intervals to prevent it from growing without
* bounds, while the store for the counts is purged less frequently.
*
* @param cutoffs
* list of timestamp cutoffs(in seconds) for which the counts are
* desired
* @return list of maps corresponding for each cutoff provided; each map
* contains the warning and the number of times the error occurred in
* the time period
*/
public List<Map<String, Element>> getWarningMessagesAndCounts(
List<Long> cutoffs) {
return this.getElementsAndCounts(warnings, cutoffs, warningsPurgeInformation);
}
private List<Map<String, Element>> getElementsAndCounts(
Map<String, SortedMap<Long, Integer>> map, List<Long> cutoffs,
SortedSet<PurgeElement> purgeInformation) {
if (purgeInformation.size() > maxUniqueMessages) {
ErrorAndWarningsCleanup cleanup = new ErrorAndWarningsCleanup();
long cutoff = Time.now() - (messageAgeLimitSeconds * 1000);
cutoff = (cutoff / 1000);
cleanup.cleanupMessages(map, purgeInformation, cutoff, maxUniqueMessages);
}
List<Map<String, Element>> ret = new ArrayList<>(cutoffs.size());
for (int i = 0; i < cutoffs.size(); ++i) {
ret.add(new HashMap<String, Element>());
}
synchronized (lock) {
for (Map.Entry<String, SortedMap<Long, Integer>> element : map.entrySet()) {
for (int i = 0; i < cutoffs.size(); ++i) {
Map<String, Element> retMap = ret.get(i);
SortedMap<Long, Integer> qualifyingTimes =
element.getValue().tailMap(cutoffs.get(i));
long count = 0;
for (Map.Entry<Long, Integer> entry : qualifyingTimes.entrySet()) {
count += entry.getValue();
}
if (!qualifyingTimes.isEmpty()) {
retMap.put(element.getKey(),
new Element(count, qualifyingTimes.lastKey()));
}
}
}
}
return ret;
}
// getters and setters for log4j
public long getCleanupInterval() {
return cleanupInterval;
}
public void setCleanupInterval(long cleanupInterval) {
this.cleanupInterval = cleanupInterval;
}
public long getMessageAgeLimitSeconds() {
return messageAgeLimitSeconds;
}
public void setMessageAgeLimitSeconds(long messageAgeLimitSeconds) {
this.messageAgeLimitSeconds = messageAgeLimitSeconds;
}
public int getMaxUniqueMessages() {
return maxUniqueMessages;
}
public void setMaxUniqueMessages(int maxUniqueMessages) {
this.maxUniqueMessages = maxUniqueMessages;
}
class ErrorAndWarningsCleanup extends TimerTask {
@Override
public void run() {
long cutoff = Time.now() - (messageAgeLimitSeconds * 1000);
cutoff = (cutoff / 1000);
cleanupMessages(errors, errorsPurgeInformation, cutoff, maxUniqueMessages);
cleanupMessages(warnings, warningsPurgeInformation, cutoff,
maxUniqueMessages);
cleanupCounts(errorsTimestampCount, cutoff);
cleanupCounts(warningsTimestampCount, cutoff);
try {
cleanupTimer.schedule(new ErrorAndWarningsCleanup(), cleanupInterval);
} catch (IllegalStateException ie) {
// don't do anything since new timer is already scheduled
}
}
void cleanupMessages(Map<String, SortedMap<Long, Integer>> map,
SortedSet<PurgeElement> purgeInformation, long cutoff,
int mapTargetSize) {
PurgeElement el = new PurgeElement("", cutoff);
synchronized (lock) {
SortedSet<PurgeElement> removeSet = purgeInformation.headSet(el);
Iterator<PurgeElement> it = removeSet.iterator();
while (it.hasNext()) {
PurgeElement p = it.next();
map.remove(p.message);
it.remove();
}
// don't keep more mapTargetSize keys
if (purgeInformation.size() > mapTargetSize) {
Object[] array = purgeInformation.toArray();
int cutoffIndex = purgeInformation.size() - mapTargetSize;
for (int i = 0; i < cutoffIndex; ++i) {
PurgeElement p = (PurgeElement) array[i];
map.remove(p.message);
purgeInformation.remove(p);
}
}
}
}
void cleanupCounts(SortedMap<Long, Integer> map, long cutoff) {
synchronized (lock) {
Iterator<Map.Entry<Long, Integer>> it = map.entrySet().iterator();
while (it.hasNext()) {
Map.Entry<Long, Integer> element = it.next();
if (element.getKey() < cutoff) {
it.remove();
}
}
}
}
}
// helper function
public static Log4jWarningErrorMetricsAppender findAppender() {
Enumeration appenders = Logger.getRootLogger().getAllAppenders();
while(appenders.hasMoreElements()) {
Object obj = appenders.nextElement();
if(obj instanceof Log4jWarningErrorMetricsAppender) {
return (Log4jWarningErrorMetricsAppender) obj;
}
}
return null;
}
}
| 15,624 | 33.877232 | 82 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/UTCClock.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.util;
import java.util.Calendar;
import java.util.TimeZone;
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Evolving;
/**
* Implementation of {@link Clock} that gives the current UTC time in
* milliseconds.
*/
@Public
@Evolving
public class UTCClock implements Clock {
private final TimeZone utcZone = TimeZone.getTimeZone("UTC");
public long getTime() {
return Calendar.getInstance(utcZone).getTimeInMillis();
}
}
| 1,347 | 33.564103 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/Apps.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.util;
import static org.apache.hadoop.yarn.util.StringHelper._split;
import static org.apache.hadoop.yarn.util.StringHelper.join;
import static org.apache.hadoop.yarn.util.StringHelper.sjoin;
import java.io.File;
import java.util.Iterator;
import java.util.Map;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.util.Shell;
import org.apache.hadoop.util.StringInterner;
import org.apache.hadoop.yarn.api.ApplicationConstants;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
/**
* Yarn internal application-related utilities
*/
@Private
public class Apps {
public static final String APP = "application";
public static final String ID = "ID";
private static final Pattern VAR_SUBBER =
Pattern.compile(Shell.getEnvironmentVariableRegex());
private static final Pattern VARVAL_SPLITTER = Pattern.compile(
"(?<=^|,)" // preceded by ',' or line begin
+ '(' + Shell.ENV_NAME_REGEX + ')' // var group
+ '='
+ "([^,]*)" // val group
);
public static ApplicationId toAppID(String aid) {
Iterator<String> it = _split(aid).iterator();
return toAppID(APP, aid, it);
}
public static ApplicationId toAppID(String prefix, String s, Iterator<String> it) {
if (!it.hasNext() || !it.next().equals(prefix)) {
throwParseException(sjoin(prefix, ID), s);
}
shouldHaveNext(prefix, s, it);
ApplicationId appId = ApplicationId.newInstance(Long.parseLong(it.next()),
Integer.parseInt(it.next()));
return appId;
}
public static void shouldHaveNext(String prefix, String s, Iterator<String> it) {
if (!it.hasNext()) {
throwParseException(sjoin(prefix, ID), s);
}
}
public static void throwParseException(String name, String s) {
throw new YarnRuntimeException(join("Error parsing ", name, ": ", s));
}
public static void setEnvFromInputString(Map<String, String> env,
String envString, String classPathSeparator) {
if (envString != null && envString.length() > 0) {
Matcher varValMatcher = VARVAL_SPLITTER.matcher(envString);
while (varValMatcher.find()) {
String envVar = varValMatcher.group(1);
Matcher m = VAR_SUBBER.matcher(varValMatcher.group(2));
StringBuffer sb = new StringBuffer();
while (m.find()) {
String var = m.group(1);
// replace $env with the child's env constructed by tt's
String replace = env.get(var);
// if this key is not configured by the tt for the child .. get it
// from the tt's env
if (replace == null)
replace = System.getenv(var);
// the env key is note present anywhere .. simply set it
if (replace == null)
replace = "";
m.appendReplacement(sb, Matcher.quoteReplacement(replace));
}
m.appendTail(sb);
addToEnvironment(env, envVar, sb.toString(), classPathSeparator);
}
}
}
/**
* This older version of this method is kept around for compatibility
* because downstream frameworks like Spark and Tez have been using it.
* Downstream frameworks are expected to move off of it.
*/
@Deprecated
public static void setEnvFromInputString(Map<String, String> env,
String envString) {
setEnvFromInputString(env, envString, File.pathSeparator);
}
@Public
@Unstable
public static void addToEnvironment(
Map<String, String> environment,
String variable, String value, String classPathSeparator) {
String val = environment.get(variable);
if (val == null) {
val = value;
} else {
val = val + classPathSeparator + value;
}
environment.put(StringInterner.weakIntern(variable),
StringInterner.weakIntern(val));
}
/**
* This older version of this method is kept around for compatibility
* because downstream frameworks like Spark and Tez have been using it.
* Downstream frameworks are expected to move off of it.
*/
@Deprecated
public static void addToEnvironment(
Map<String, String> environment,
String variable, String value) {
addToEnvironment(environment, variable, value, File.pathSeparator);
}
public static String crossPlatformify(String var) {
return ApplicationConstants.PARAMETER_EXPANSION_LEFT + var
+ ApplicationConstants.PARAMETER_EXPANSION_RIGHT;
}
}
| 5,539 | 35.688742 | 85 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/AdHocLogDumper.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.util;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
import org.apache.log4j.*;
import java.io.File;
import java.io.IOException;
import java.util.*;
@InterfaceAudience.Private
@InterfaceStability.Unstable
public class AdHocLogDumper {
private static final Log LOG = LogFactory.getLog(AdHocLogDumper.class);
private String name;
private String targetFilename;
private Map<String, Priority> appenderLevels;
private Level currentLogLevel;
public static final String AD_HOC_DUMPER_APPENDER = "ad-hoc-dumper-appender";
private static boolean logFlag = false;
private static final Object lock = new Object();
public AdHocLogDumper(String name, String targetFilename) {
this.name = name;
this.targetFilename = targetFilename;
appenderLevels = new HashMap<>();
}
public void dumpLogs(String level, int timePeriod)
throws YarnRuntimeException, IOException {
synchronized (lock) {
if (logFlag) {
LOG.info("Attempt to dump logs when appender is already running");
throw new YarnRuntimeException("Appender is already dumping logs");
}
Level targetLevel = Level.toLevel(level);
Log log = LogFactory.getLog(name);
appenderLevels.clear();
if (log instanceof Log4JLogger) {
Logger packageLogger = ((Log4JLogger) log).getLogger();
currentLogLevel = packageLogger.getLevel();
Level currentEffectiveLevel = packageLogger.getEffectiveLevel();
// make sure we can create the appender first
Layout layout = new PatternLayout("%d{ISO8601} %p %c: %m%n");
FileAppender fApp;
File file =
new File(System.getProperty("yarn.log.dir"), targetFilename);
try {
fApp = new FileAppender(layout, file.getAbsolutePath(), false);
} catch (IOException ie) {
LOG
.warn(
"Error creating file, can't dump logs to "
+ file.getAbsolutePath(), ie);
throw ie;
}
fApp.setName(AdHocLogDumper.AD_HOC_DUMPER_APPENDER);
fApp.setThreshold(targetLevel);
// get current threshold of all appenders and set it to the effective
// level
for (Enumeration appenders = Logger.getRootLogger().getAllAppenders(); appenders
.hasMoreElements();) {
Object obj = appenders.nextElement();
if (obj instanceof AppenderSkeleton) {
AppenderSkeleton appender = (AppenderSkeleton) obj;
appenderLevels.put(appender.getName(), appender.getThreshold());
appender.setThreshold(currentEffectiveLevel);
}
}
packageLogger.addAppender(fApp);
LOG.info("Dumping adhoc logs for " + name + " to "
+ file.getAbsolutePath() + " for " + timePeriod + " milliseconds");
packageLogger.setLevel(targetLevel);
logFlag = true;
TimerTask restoreLogLevel = new RestoreLogLevel();
Timer restoreLogLevelTimer = new Timer();
restoreLogLevelTimer.schedule(restoreLogLevel, timePeriod);
}
}
}
class RestoreLogLevel extends TimerTask {
@Override
public void run() {
Log log = LogFactory.getLog(name);
if (log instanceof Log4JLogger) {
Logger logger = ((Log4JLogger) log).getLogger();
logger.removeAppender(AD_HOC_DUMPER_APPENDER);
logger.setLevel(currentLogLevel);
for (Enumeration appenders = Logger.getRootLogger().getAllAppenders(); appenders
.hasMoreElements();) {
Object obj = appenders.nextElement();
if (obj instanceof AppenderSkeleton) {
AppenderSkeleton appender = (AppenderSkeleton) obj;
appender.setThreshold(appenderLevels.get(appender.getName()));
}
}
logFlag = false;
LOG.info("Done dumping adhoc logs for " + name);
}
}
}
}
| 4,982 | 36.75 | 88 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ConverterUtils.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.util;
import static org.apache.hadoop.yarn.util.StringHelper._split;
import java.net.InetSocketAddress;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;
import java.util.Map.Entry;
import java.util.NoSuchElementException;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.TokenIdentifier;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.api.records.URL;
import org.apache.hadoop.yarn.factories.RecordFactory;
import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
/**
* This class contains a set of utilities which help converting data structures
* from/to 'serializableFormat' to/from hadoop/nativejava data structures.
*
*/
@Private
public class ConverterUtils {
public static final String APPLICATION_PREFIX = "application";
public static final String CONTAINER_PREFIX = "container";
public static final String APPLICATION_ATTEMPT_PREFIX = "appattempt";
/**
* return a hadoop path from a given url
*
* @param url
* url to convert
* @return path from {@link URL}
* @throws URISyntaxException
*/
public static Path getPathFromYarnURL(URL url) throws URISyntaxException {
String scheme = url.getScheme() == null ? "" : url.getScheme();
String authority = "";
if (url.getHost() != null) {
authority = url.getHost();
if (url.getUserInfo() != null) {
authority = url.getUserInfo() + "@" + authority;
}
if (url.getPort() > 0) {
authority += ":" + url.getPort();
}
}
return new Path(
(new URI(scheme, authority, url.getFile(), null, null)).normalize());
}
/**
* change from CharSequence to string for map key and value
* @param env map for converting
* @return string,string map
*/
public static Map<String, String> convertToString(
Map<CharSequence, CharSequence> env) {
Map<String, String> stringMap = new HashMap<String, String>();
for (Entry<CharSequence, CharSequence> entry: env.entrySet()) {
stringMap.put(entry.getKey().toString(), entry.getValue().toString());
}
return stringMap;
}
public static URL getYarnUrlFromPath(Path path) {
return getYarnUrlFromURI(path.toUri());
}
public static URL getYarnUrlFromURI(URI uri) {
URL url = RecordFactoryProvider.getRecordFactory(null).newRecordInstance(URL.class);
if (uri.getHost() != null) {
url.setHost(uri.getHost());
}
if (uri.getUserInfo() != null) {
url.setUserInfo(uri.getUserInfo());
}
url.setPort(uri.getPort());
url.setScheme(uri.getScheme());
url.setFile(uri.getPath());
return url;
}
public static String toString(ApplicationId appId) {
return appId.toString();
}
public static ApplicationId toApplicationId(RecordFactory recordFactory,
String appIdStr) {
Iterator<String> it = _split(appIdStr).iterator();
it.next(); // prefix. TODO: Validate application prefix
return toApplicationId(recordFactory, it);
}
private static ApplicationId toApplicationId(RecordFactory recordFactory,
Iterator<String> it) {
ApplicationId appId = ApplicationId.newInstance(Long.parseLong(it.next()),
Integer.parseInt(it.next()));
return appId;
}
private static ApplicationAttemptId toApplicationAttemptId(
Iterator<String> it) throws NumberFormatException {
ApplicationId appId = ApplicationId.newInstance(Long.parseLong(it.next()),
Integer.parseInt(it.next()));
ApplicationAttemptId appAttemptId =
ApplicationAttemptId.newInstance(appId, Integer.parseInt(it.next()));
return appAttemptId;
}
private static ApplicationId toApplicationId(
Iterator<String> it) throws NumberFormatException {
ApplicationId appId = ApplicationId.newInstance(Long.parseLong(it.next()),
Integer.parseInt(it.next()));
return appId;
}
public static String toString(ContainerId cId) {
return cId == null ? null : cId.toString();
}
public static NodeId toNodeIdWithDefaultPort(String nodeIdStr) {
if (nodeIdStr.indexOf(":") < 0) {
return toNodeId(nodeIdStr + ":0");
}
return toNodeId(nodeIdStr);
}
public static NodeId toNodeId(String nodeIdStr) {
String[] parts = nodeIdStr.split(":");
if (parts.length != 2) {
throw new IllegalArgumentException("Invalid NodeId [" + nodeIdStr
+ "]. Expected host:port");
}
try {
NodeId nodeId =
NodeId.newInstance(parts[0].trim(), Integer.parseInt(parts[1]));
return nodeId;
} catch (NumberFormatException e) {
throw new IllegalArgumentException("Invalid port: " + parts[1], e);
}
}
public static ContainerId toContainerId(String containerIdStr) {
return ContainerId.fromString(containerIdStr);
}
public static ApplicationAttemptId toApplicationAttemptId(
String applicationAttmeptIdStr) {
Iterator<String> it = _split(applicationAttmeptIdStr).iterator();
if (!it.next().equals(APPLICATION_ATTEMPT_PREFIX)) {
throw new IllegalArgumentException("Invalid AppAttemptId prefix: "
+ applicationAttmeptIdStr);
}
try {
return toApplicationAttemptId(it);
} catch (NumberFormatException n) {
throw new IllegalArgumentException("Invalid AppAttemptId: "
+ applicationAttmeptIdStr, n);
} catch (NoSuchElementException e) {
throw new IllegalArgumentException("Invalid AppAttemptId: "
+ applicationAttmeptIdStr, e);
}
}
public static ApplicationId toApplicationId(
String appIdStr) {
Iterator<String> it = _split(appIdStr).iterator();
if (!it.next().equals(APPLICATION_PREFIX)) {
throw new IllegalArgumentException("Invalid ApplicationId prefix: "
+ appIdStr + ". The valid ApplicationId should start with prefix "
+ APPLICATION_PREFIX);
}
try {
return toApplicationId(it);
} catch (NumberFormatException n) {
throw new IllegalArgumentException("Invalid ApplicationId: "
+ appIdStr, n);
} catch (NoSuchElementException e) {
throw new IllegalArgumentException("Invalid ApplicationId: "
+ appIdStr, e);
}
}
/**
* Convert a protobuf token into a rpc token and set its service. Supposed
* to be used for tokens other than RMDelegationToken. For
* RMDelegationToken, use
* {@link #convertFromYarn(org.apache.hadoop.yarn.api.records.Token,
* org.apache.hadoop.io.Text)} instead.
*
* @param protoToken the yarn token
* @param serviceAddr the connect address for the service
* @return rpc token
*/
public static <T extends TokenIdentifier> Token<T> convertFromYarn(
org.apache.hadoop.yarn.api.records.Token protoToken,
InetSocketAddress serviceAddr) {
Token<T> token = new Token<T>(protoToken.getIdentifier().array(),
protoToken.getPassword().array(),
new Text(protoToken.getKind()),
new Text(protoToken.getService()));
if (serviceAddr != null) {
SecurityUtil.setTokenService(token, serviceAddr);
}
return token;
}
/**
* Convert a protobuf token into a rpc token and set its service.
*
* @param protoToken the yarn token
* @param service the service for the token
*/
public static <T extends TokenIdentifier> Token<T> convertFromYarn(
org.apache.hadoop.yarn.api.records.Token protoToken,
Text service) {
Token<T> token = new Token<T>(protoToken.getIdentifier().array(),
protoToken.getPassword().array(),
new Text(protoToken.getKind()),
new Text(protoToken.getService()));
if (service != null) {
token.setService(service);
}
return token;
}
}
| 9,068 | 33.48289 | 88 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/LinuxResourceCalculatorPlugin.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.util;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.util.SysInfoLinux;
/**
* Plugin to calculate resource information on Linux systems.
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
public class LinuxResourceCalculatorPlugin extends ResourceCalculatorPlugin {
public LinuxResourceCalculatorPlugin() {
super(new SysInfoLinux());
}
}
| 1,295 | 35 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/YarnVersionInfo.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.util;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.util.VersionInfo;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* This class finds the package info for Yarn.
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
public class YarnVersionInfo extends VersionInfo {
private static final Log LOG = LogFactory.getLog(YarnVersionInfo.class);
private static YarnVersionInfo YARN_VERSION_INFO = new YarnVersionInfo();
protected YarnVersionInfo() {
super("yarn");
}
/**
* Get the Yarn version.
* @return the Yarn version string, eg. "0.6.3-dev"
*/
public static String getVersion() {
return YARN_VERSION_INFO._getVersion();
}
/**
* Get the subversion revision number for the root directory
* @return the revision number, eg. "451451"
*/
public static String getRevision() {
return YARN_VERSION_INFO._getRevision();
}
/**
* Get the branch on which this originated.
* @return The branch name, e.g. "trunk" or "branches/branch-0.20"
*/
public static String getBranch() {
return YARN_VERSION_INFO._getBranch();
}
/**
* The date that Yarn was compiled.
* @return the compilation date in unix date format
*/
public static String getDate() {
return YARN_VERSION_INFO._getDate();
}
/**
* The user that compiled Yarn.
* @return the username of the user
*/
public static String getUser() {
return YARN_VERSION_INFO._getUser();
}
/**
* Get the subversion URL for the root Yarn directory.
*/
public static String getUrl() {
return YARN_VERSION_INFO._getUrl();
}
/**
* Get the checksum of the source files from which Yarn was
* built.
**/
public static String getSrcChecksum() {
return YARN_VERSION_INFO._getSrcChecksum();
}
/**
* Returns the buildVersion which includes version,
* revision, user and date.
*/
public static String getBuildVersion(){
return YARN_VERSION_INFO._getBuildVersion();
}
public static void main(String[] args) {
LOG.debug("version: "+ getVersion());
System.out.println("Yarn " + getVersion());
System.out.println("Subversion " + getUrl() + " -r " + getRevision());
System.out.println("Compiled by " + getUser() + " on " + getDate());
System.out.println("From source with checksum " + getSrcChecksum());
}
}
| 3,308 | 28.810811 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/SystemClock.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.util;
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Stable;
/**
* Implementation of {@link Clock} that gives the current time from the system
* clock in milliseconds.
*/
@Public
@Stable
public class SystemClock implements Clock {
public long getTime() {
return System.currentTimeMillis();
}
}
| 1,207 | 34.529412 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/TrackingUriPlugin.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.util;
import java.net.URI;
import java.net.URISyntaxException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.yarn.api.records.ApplicationId;
/**
* Plugin to derive a tracking URL from a Yarn Application ID
*
*/
@InterfaceAudience.LimitedPrivate({"MapReduce"})
@InterfaceStability.Unstable
public abstract class TrackingUriPlugin extends Configured {
/**
* Given an application ID, return a tracking URI.
* @param id the ID for which a URI is returned
* @return the tracking URI
* @throws URISyntaxException
*/
public abstract URI getTrackingUri(ApplicationId id)
throws URISyntaxException;
}
| 1,604 | 33.148936 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/AuxiliaryServiceHelper.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.util;
import java.nio.ByteBuffer;
import java.util.Map;
import org.apache.commons.codec.binary.Base64;
public class AuxiliaryServiceHelper {
public final static String NM_AUX_SERVICE = "NM_AUX_SERVICE_";
public static ByteBuffer getServiceDataFromEnv(String serviceName,
Map<String, String> env) {
String meta = env.get(getPrefixServiceName(serviceName));
if (null == meta) {
return null;
}
byte[] metaData = Base64.decodeBase64(meta);
return ByteBuffer.wrap(metaData);
}
public static void setServiceDataIntoEnv(String serviceName,
ByteBuffer metaData, Map<String, String> env) {
byte[] byteData = metaData.array();
env.put(getPrefixServiceName(serviceName),
Base64.encodeBase64String(byteData));
}
private static String getPrefixServiceName(String serviceName) {
return NM_AUX_SERVICE + serviceName;
}
}
| 1,709 | 31.884615 | 74 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/WindowsResourceCalculatorPlugin.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.util;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.util.SysInfoWindows;
@InterfaceAudience.Private
@InterfaceStability.Unstable
public class WindowsResourceCalculatorPlugin extends ResourceCalculatorPlugin {
public WindowsResourceCalculatorPlugin() {
super(new SysInfoWindows());
}
}
| 1,233 | 36.393939 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/FSDownload.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.util;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.net.URISyntaxException;
import java.security.PrivilegedExceptionAction;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import java.util.regex.Pattern;
import org.apache.commons.io.FileUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience.LimitedPrivate;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileContext;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.LocalFileSystem;
import org.apache.hadoop.fs.Options.Rename;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.util.RunJar;
import org.apache.hadoop.util.Shell;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.yarn.api.records.LocalResource;
import org.apache.hadoop.yarn.api.records.LocalResourceVisibility;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.cache.CacheLoader;
import com.google.common.cache.LoadingCache;
import com.google.common.util.concurrent.Futures;
/**
* Download a single URL to the local disk.
*
*/
@LimitedPrivate({"YARN", "MapReduce"})
public class FSDownload implements Callable<Path> {
private static final Log LOG = LogFactory.getLog(FSDownload.class);
private FileContext files;
private final UserGroupInformation userUgi;
private Configuration conf;
private LocalResource resource;
private final LoadingCache<Path,Future<FileStatus>> statCache;
/** The local FS dir path under which this resource is to be localized to */
private Path destDirPath;
private static final FsPermission cachePerms = new FsPermission(
(short) 0755);
static final FsPermission PUBLIC_FILE_PERMS = new FsPermission((short) 0555);
static final FsPermission PRIVATE_FILE_PERMS = new FsPermission(
(short) 0500);
static final FsPermission PUBLIC_DIR_PERMS = new FsPermission((short) 0755);
static final FsPermission PRIVATE_DIR_PERMS = new FsPermission((short) 0700);
public FSDownload(FileContext files, UserGroupInformation ugi, Configuration conf,
Path destDirPath, LocalResource resource) {
this(files, ugi, conf, destDirPath, resource, null);
}
public FSDownload(FileContext files, UserGroupInformation ugi, Configuration conf,
Path destDirPath, LocalResource resource,
LoadingCache<Path,Future<FileStatus>> statCache) {
this.conf = conf;
this.destDirPath = destDirPath;
this.files = files;
this.userUgi = ugi;
this.resource = resource;
this.statCache = statCache;
}
LocalResource getResource() {
return resource;
}
private void createDir(Path path, FsPermission perm) throws IOException {
files.mkdir(path, perm, false);
if (!perm.equals(files.getUMask().applyUMask(perm))) {
files.setPermission(path, perm);
}
}
/**
* Creates the cache loader for the status loading cache. This should be used
* to create an instance of the status cache that is passed into the
* FSDownload constructor.
*/
public static CacheLoader<Path,Future<FileStatus>>
createStatusCacheLoader(final Configuration conf) {
return new CacheLoader<Path,Future<FileStatus>>() {
public Future<FileStatus> load(Path path) {
try {
FileSystem fs = path.getFileSystem(conf);
return Futures.immediateFuture(fs.getFileStatus(path));
} catch (Throwable th) {
// report failures so it can be memoized
return Futures.immediateFailedFuture(th);
}
}
};
}
/**
* Returns a boolean to denote whether a cache file is visible to all (public)
* or not
*
* @return true if the path in the current path is visible to all, false
* otherwise
*/
@Private
public static boolean isPublic(FileSystem fs, Path current, FileStatus sStat,
LoadingCache<Path,Future<FileStatus>> statCache) throws IOException {
current = fs.makeQualified(current);
//the leaf level file should be readable by others
if (!checkPublicPermsForAll(fs, sStat, FsAction.READ_EXECUTE, FsAction.READ)) {
return false;
}
if (Shell.WINDOWS && fs instanceof LocalFileSystem) {
// Relax the requirement for public cache on LFS on Windows since default
// permissions are "700" all the way up to the drive letter. In this
// model, the only requirement for a user is to give EVERYONE group
// permission on the file and the file will be considered public.
// This code path is only hit when fs.default.name is file:/// (mainly
// in tests).
return true;
}
return ancestorsHaveExecutePermissions(fs, current.getParent(), statCache);
}
private static boolean checkPublicPermsForAll(FileSystem fs,
FileStatus status, FsAction dir, FsAction file)
throws IOException {
FsPermission perms = status.getPermission();
FsAction otherAction = perms.getOtherAction();
if (status.isDirectory()) {
if (!otherAction.implies(dir)) {
return false;
}
for (FileStatus child : fs.listStatus(status.getPath())) {
if(!checkPublicPermsForAll(fs, child, dir, file)) {
return false;
}
}
return true;
}
return (otherAction.implies(file));
}
/**
* Returns true if all ancestors of the specified path have the 'execute'
* permission set for all users (i.e. that other users can traverse
* the directory hierarchy to the given path)
*/
@VisibleForTesting
static boolean ancestorsHaveExecutePermissions(FileSystem fs,
Path path, LoadingCache<Path,Future<FileStatus>> statCache)
throws IOException {
Path current = path;
while (current != null) {
//the subdirs in the path should have execute permissions for others
if (!checkPermissionOfOther(fs, current, FsAction.EXECUTE, statCache)) {
return false;
}
current = current.getParent();
}
return true;
}
/**
* Checks for a given path whether the Other permissions on it
* imply the permission in the passed FsAction
* @param fs
* @param path
* @param action
* @return true if the path in the uri is visible to all, false otherwise
* @throws IOException
*/
private static boolean checkPermissionOfOther(FileSystem fs, Path path,
FsAction action, LoadingCache<Path,Future<FileStatus>> statCache)
throws IOException {
FileStatus status = getFileStatus(fs, path, statCache);
FsPermission perms = status.getPermission();
FsAction otherAction = perms.getOtherAction();
return otherAction.implies(action);
}
/**
* Obtains the file status, first by checking the stat cache if it is
* available, and then by getting it explicitly from the filesystem. If we got
* the file status from the filesystem, it is added to the stat cache.
*
* The stat cache is expected to be managed by callers who provided it to
* FSDownload.
*/
private static FileStatus getFileStatus(final FileSystem fs, final Path path,
LoadingCache<Path,Future<FileStatus>> statCache) throws IOException {
// if the stat cache does not exist, simply query the filesystem
if (statCache == null) {
return fs.getFileStatus(path);
}
try {
// get or load it from the cache
return statCache.get(path).get();
} catch (ExecutionException e) {
Throwable cause = e.getCause();
// the underlying exception should normally be IOException
if (cause instanceof IOException) {
throw (IOException)cause;
} else {
throw new IOException(cause);
}
} catch (InterruptedException e) { // should not happen
Thread.currentThread().interrupt();
throw new IOException(e);
}
}
private Path copy(Path sCopy, Path dstdir) throws IOException {
FileSystem sourceFs = sCopy.getFileSystem(conf);
Path dCopy = new Path(dstdir, "tmp_"+sCopy.getName());
FileStatus sStat = sourceFs.getFileStatus(sCopy);
if (sStat.getModificationTime() != resource.getTimestamp()) {
throw new IOException("Resource " + sCopy +
" changed on src filesystem (expected " + resource.getTimestamp() +
", was " + sStat.getModificationTime());
}
if (resource.getVisibility() == LocalResourceVisibility.PUBLIC) {
if (!isPublic(sourceFs, sCopy, sStat, statCache)) {
throw new IOException("Resource " + sCopy +
" is not publicly accessable and as such cannot be part of the" +
" public cache.");
}
}
FileUtil.copy(sourceFs, sStat, FileSystem.getLocal(conf), dCopy, false,
true, conf);
return dCopy;
}
private long unpack(File localrsrc, File dst) throws IOException {
switch (resource.getType()) {
case ARCHIVE: {
String lowerDst = StringUtils.toLowerCase(dst.getName());
if (lowerDst.endsWith(".jar")) {
RunJar.unJar(localrsrc, dst);
} else if (lowerDst.endsWith(".zip")) {
FileUtil.unZip(localrsrc, dst);
} else if (lowerDst.endsWith(".tar.gz") ||
lowerDst.endsWith(".tgz") ||
lowerDst.endsWith(".tar")) {
FileUtil.unTar(localrsrc, dst);
} else {
LOG.warn("Cannot unpack " + localrsrc);
if (!localrsrc.renameTo(dst)) {
throw new IOException("Unable to rename file: [" + localrsrc
+ "] to [" + dst + "]");
}
}
}
break;
case PATTERN: {
String lowerDst = StringUtils.toLowerCase(dst.getName());
if (lowerDst.endsWith(".jar")) {
String p = resource.getPattern();
RunJar.unJar(localrsrc, dst,
p == null ? RunJar.MATCH_ANY : Pattern.compile(p));
File newDst = new File(dst, dst.getName());
if (!dst.exists() && !dst.mkdir()) {
throw new IOException("Unable to create directory: [" + dst + "]");
}
if (!localrsrc.renameTo(newDst)) {
throw new IOException("Unable to rename file: [" + localrsrc
+ "] to [" + newDst + "]");
}
} else if (lowerDst.endsWith(".zip")) {
LOG.warn("Treating [" + localrsrc + "] as an archive even though it " +
"was specified as PATTERN");
FileUtil.unZip(localrsrc, dst);
} else if (lowerDst.endsWith(".tar.gz") ||
lowerDst.endsWith(".tgz") ||
lowerDst.endsWith(".tar")) {
LOG.warn("Treating [" + localrsrc + "] as an archive even though it " +
"was specified as PATTERN");
FileUtil.unTar(localrsrc, dst);
} else {
LOG.warn("Cannot unpack " + localrsrc);
if (!localrsrc.renameTo(dst)) {
throw new IOException("Unable to rename file: [" + localrsrc
+ "] to [" + dst + "]");
}
}
}
break;
case FILE:
default:
if (!localrsrc.renameTo(dst)) {
throw new IOException("Unable to rename file: [" + localrsrc
+ "] to [" + dst + "]");
}
break;
}
if(localrsrc.isFile()){
try {
files.delete(new Path(localrsrc.toString()), false);
} catch (IOException ignore) {
}
}
return 0;
// TODO Should calculate here before returning
//return FileUtil.getDU(destDir);
}
@Override
public Path call() throws Exception {
final Path sCopy;
try {
sCopy = ConverterUtils.getPathFromYarnURL(resource.getResource());
} catch (URISyntaxException e) {
throw new IOException("Invalid resource", e);
}
createDir(destDirPath, cachePerms);
final Path dst_work = new Path(destDirPath + "_tmp");
createDir(dst_work, cachePerms);
Path dFinal = files.makeQualified(new Path(dst_work, sCopy.getName()));
try {
Path dTmp = null == userUgi ? files.makeQualified(copy(sCopy, dst_work))
: userUgi.doAs(new PrivilegedExceptionAction<Path>() {
public Path run() throws Exception {
return files.makeQualified(copy(sCopy, dst_work));
};
});
unpack(new File(dTmp.toUri()), new File(dFinal.toUri()));
changePermissions(dFinal.getFileSystem(conf), dFinal);
files.rename(dst_work, destDirPath, Rename.OVERWRITE);
} catch (Exception e) {
try {
files.delete(destDirPath, true);
} catch (IOException ignore) {
}
throw e;
} finally {
try {
files.delete(dst_work, true);
} catch (FileNotFoundException ignore) {
}
conf = null;
resource = null;
}
return files.makeQualified(new Path(destDirPath, sCopy.getName()));
}
/**
* Recursively change permissions of all files/dirs on path based
* on resource visibility.
* Change to 755 or 700 for dirs, 555 or 500 for files.
* @param fs FileSystem
* @param path Path to modify perms for
* @throws IOException
* @throws InterruptedException
*/
private void changePermissions(FileSystem fs, final Path path)
throws IOException, InterruptedException {
File f = new File(path.toUri());
if (FileUtils.isSymlink(f)) {
// avoid following symlinks when changing permissions
return;
}
boolean isDir = f.isDirectory();
FsPermission perm = cachePerms;
// set public perms as 755 or 555 based on dir or file
if (resource.getVisibility() == LocalResourceVisibility.PUBLIC) {
perm = isDir ? PUBLIC_DIR_PERMS : PUBLIC_FILE_PERMS;
}
// set private perms as 700 or 500
else {
// PRIVATE:
// APPLICATION:
perm = isDir ? PRIVATE_DIR_PERMS : PRIVATE_FILE_PERMS;
}
LOG.debug("Changing permissions for path " + path
+ " to perm " + perm);
final FsPermission fPerm = perm;
if (null == userUgi) {
files.setPermission(path, perm);
}
else {
userUgi.doAs(new PrivilegedExceptionAction<Void>() {
public Void run() throws Exception {
files.setPermission(path, fPerm);
return null;
}
});
}
if (isDir) {
FileStatus[] statuses = fs.listStatus(path);
for (FileStatus status : statuses) {
changePermissions(fs, status.getPath());
}
}
}
}
| 15,478 | 34.583908 | 84 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/AbstractLivelinessMonitor.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.util;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Evolving;
import org.apache.hadoop.service.AbstractService;
/**
* A simple liveliness monitor with which clients can register, trust the
* component to monitor liveliness, get a call-back on expiry and then finally
* unregister.
*/
@Public
@Evolving
public abstract class AbstractLivelinessMonitor<O> extends AbstractService {
private static final Log LOG = LogFactory.getLog(AbstractLivelinessMonitor.class);
//thread which runs periodically to see the last time since a heartbeat is
//received.
private Thread checkerThread;
private volatile boolean stopped;
public static final int DEFAULT_EXPIRE = 5*60*1000;//5 mins
private int expireInterval = DEFAULT_EXPIRE;
private int monitorInterval = expireInterval/3;
private final Clock clock;
private Map<O, Long> running = new HashMap<O, Long>();
public AbstractLivelinessMonitor(String name, Clock clock) {
super(name);
this.clock = clock;
}
@Override
protected void serviceStart() throws Exception {
assert !stopped : "starting when already stopped";
resetTimer();
checkerThread = new Thread(new PingChecker());
checkerThread.setName("Ping Checker");
checkerThread.start();
super.serviceStart();
}
@Override
protected void serviceStop() throws Exception {
stopped = true;
if (checkerThread != null) {
checkerThread.interrupt();
}
super.serviceStop();
}
protected abstract void expire(O ob);
protected void setExpireInterval(int expireInterval) {
this.expireInterval = expireInterval;
}
protected void setMonitorInterval(int monitorInterval) {
this.monitorInterval = monitorInterval;
}
public synchronized void receivedPing(O ob) {
//only put for the registered objects
if (running.containsKey(ob)) {
running.put(ob, clock.getTime());
}
}
public synchronized void register(O ob) {
running.put(ob, clock.getTime());
}
public synchronized void unregister(O ob) {
running.remove(ob);
}
public synchronized void resetTimer() {
long time = clock.getTime();
for (O ob : running.keySet()) {
running.put(ob, time);
}
}
private class PingChecker implements Runnable {
@Override
public void run() {
while (!stopped && !Thread.currentThread().isInterrupted()) {
synchronized (AbstractLivelinessMonitor.this) {
Iterator<Map.Entry<O, Long>> iterator =
running.entrySet().iterator();
//avoid calculating current time everytime in loop
long currentTime = clock.getTime();
while (iterator.hasNext()) {
Map.Entry<O, Long> entry = iterator.next();
if (currentTime > entry.getValue() + expireInterval) {
iterator.remove();
expire(entry.getKey());
LOG.info("Expired:" + entry.getKey().toString() +
" Timed out after " + expireInterval/1000 + " secs");
}
}
}
try {
Thread.sleep(monitorInterval);
} catch (InterruptedException e) {
LOG.info(getName() + " thread interrupted");
break;
}
}
}
}
}
| 4,292 | 29.020979 | 84 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/Times.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.util;
import java.text.SimpleDateFormat;
import java.util.Date;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience.Private;
@Private
public class Times {
private static final Log LOG = LogFactory.getLog(Times.class);
// This format should match the one used in yarn.dt.plugins.js
static final ThreadLocal<SimpleDateFormat> dateFormat =
new ThreadLocal<SimpleDateFormat>() {
@Override protected SimpleDateFormat initialValue() {
return new SimpleDateFormat("EEE MMM dd HH:mm:ss Z yyyy");
}
};
public static long elapsed(long started, long finished) {
return Times.elapsed(started, finished, true);
}
// A valid elapsed is supposed to be non-negative. If finished/current time
// is ahead of the started time, return -1 to indicate invalid elapsed time,
// and record a warning log.
public static long elapsed(long started, long finished, boolean isRunning) {
if (finished > 0 && started > 0) {
long elapsed = finished - started;
if (elapsed >= 0) {
return elapsed;
} else {
LOG.warn("Finished time " + finished
+ " is ahead of started time " + started);
return -1;
}
}
if (isRunning) {
long current = System.currentTimeMillis();
long elapsed = started > 0 ? current - started : 0;
if (elapsed >= 0) {
return elapsed;
} else {
LOG.warn("Current time " + current
+ " is ahead of started time " + started);
return -1;
}
} else {
return -1;
}
}
public static String format(long ts) {
return ts > 0 ? String.valueOf(dateFormat.get().format(new Date(ts)))
: "N/A";
}
}
| 2,624 | 32.653846 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/WindowsBasedProcessTree.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.util;
import java.io.IOException;
import java.util.HashMap;
import java.util.Map;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.util.Shell;
import org.apache.hadoop.util.Shell.ShellCommandExecutor;
import org.apache.hadoop.util.StringUtils;
@Private
public class WindowsBasedProcessTree extends ResourceCalculatorProcessTree {
static final Log LOG = LogFactory
.getLog(WindowsBasedProcessTree.class);
static class ProcessInfo {
String pid; // process pid
long vmem; // virtual memory
long workingSet; // working set, RAM used
long cpuTimeMs; // total cpuTime in millisec
long cpuTimeMsDelta; // delta of cpuTime since last update
int age = 1;
}
private String taskProcessId = null;
private long cpuTimeMs = UNAVAILABLE;
private Map<String, ProcessInfo> processTree =
new HashMap<String, ProcessInfo>();
public static boolean isAvailable() {
if (Shell.WINDOWS) {
ShellCommandExecutor shellExecutor = new ShellCommandExecutor(
new String[] { Shell.WINUTILS, "help" });
try {
shellExecutor.execute();
} catch (IOException e) {
LOG.error(StringUtils.stringifyException(e));
} finally {
String output = shellExecutor.getOutput();
if (output != null &&
output.contains("Prints to stdout a list of processes in the task")) {
return true;
}
}
}
return false;
}
public WindowsBasedProcessTree(String pid) {
super(pid);
taskProcessId = pid;
}
// helper method to override while testing
String getAllProcessInfoFromShell() {
ShellCommandExecutor shellExecutor = new ShellCommandExecutor(
new String[] { Shell.WINUTILS, "task", "processList", taskProcessId });
try {
shellExecutor.execute();
return shellExecutor.getOutput();
} catch (IOException e) {
LOG.error(StringUtils.stringifyException(e));
}
return null;
}
/**
* Parses string of process info lines into ProcessInfo objects
* @param processesInfoStr
* @return Map of pid string to ProcessInfo objects
*/
Map<String, ProcessInfo> createProcessInfo(String processesInfoStr) {
String[] processesStr = processesInfoStr.split("\r\n");
Map<String, ProcessInfo> allProcs = new HashMap<String, ProcessInfo>();
final int procInfoSplitCount = 4;
for (String processStr : processesStr) {
if (processStr != null) {
String[] procInfo = processStr.split(",");
if (procInfo.length == procInfoSplitCount) {
try {
ProcessInfo pInfo = new ProcessInfo();
pInfo.pid = procInfo[0];
pInfo.vmem = Long.parseLong(procInfo[1]);
pInfo.workingSet = Long.parseLong(procInfo[2]);
pInfo.cpuTimeMs = Long.parseLong(procInfo[3]);
allProcs.put(pInfo.pid, pInfo);
} catch (NumberFormatException nfe) {
LOG.debug("Error parsing procInfo." + nfe);
}
} else {
LOG.debug("Expected split length of proc info to be "
+ procInfoSplitCount + ". Got " + procInfo.length);
}
}
}
return allProcs;
}
@Override
public void updateProcessTree() {
if(taskProcessId != null) {
// taskProcessId can be null in some tests
String processesInfoStr = getAllProcessInfoFromShell();
if (processesInfoStr != null && processesInfoStr.length() > 0) {
Map<String, ProcessInfo> allProcessInfo = createProcessInfo(processesInfoStr);
for (Map.Entry<String, ProcessInfo> entry : allProcessInfo.entrySet()) {
String pid = entry.getKey();
ProcessInfo pInfo = entry.getValue();
ProcessInfo oldInfo = processTree.get(pid);
if (oldInfo != null) {
// existing process, update age and replace value
pInfo.age += oldInfo.age;
// calculate the delta since the last refresh. totals are being kept
// in the WindowsBasedProcessTree object
pInfo.cpuTimeMsDelta = pInfo.cpuTimeMs - oldInfo.cpuTimeMs;
} else {
// new process. delta cpu == total cpu
pInfo.cpuTimeMsDelta = pInfo.cpuTimeMs;
}
}
processTree.clear();
processTree = allProcessInfo;
} else {
// clearing process tree to mimic semantics of existing Procfs impl
processTree.clear();
}
}
}
@Override
public boolean checkPidPgrpidForMatch() {
// This is always true on Windows, because the pid doubles as a job object
// name for task management.
return true;
}
@Override
public String getProcessTreeDump() {
StringBuilder ret = new StringBuilder();
// The header.
ret.append(String.format("\t|- PID " + "CPU_TIME(MILLIS) "
+ "VMEM(BYTES) WORKING_SET(BYTES)%n"));
for (ProcessInfo p : processTree.values()) {
if (p != null) {
ret.append(String.format("\t|- %s %d %d %d%n", p.pid,
p.cpuTimeMs, p.vmem, p.workingSet));
}
}
return ret.toString();
}
@Override
public long getVirtualMemorySize(int olderThanAge) {
long total = UNAVAILABLE;
for (ProcessInfo p : processTree.values()) {
if (p != null) {
if (total == UNAVAILABLE) {
total = 0;
}
if (p.age > olderThanAge) {
total += p.vmem;
}
}
}
return total;
}
@Override
@SuppressWarnings("deprecation")
public long getCumulativeVmem(int olderThanAge) {
return getVirtualMemorySize(olderThanAge);
}
@Override
public long getRssMemorySize(int olderThanAge) {
long total = UNAVAILABLE;
for (ProcessInfo p : processTree.values()) {
if (p != null) {
if (total == UNAVAILABLE) {
total = 0;
}
if (p.age > olderThanAge) {
total += p.workingSet;
}
}
}
return total;
}
@Override
@SuppressWarnings("deprecation")
public long getCumulativeRssmem(int olderThanAge) {
return getRssMemorySize(olderThanAge);
}
@Override
public long getCumulativeCpuTime() {
for (ProcessInfo p : processTree.values()) {
if (cpuTimeMs == UNAVAILABLE) {
cpuTimeMs = 0;
}
cpuTimeMs += p.cpuTimeMsDelta;
}
return cpuTimeMs;
}
@Override
public float getCpuUsagePercent() {
return UNAVAILABLE;
}
}
| 7,367 | 30.220339 | 86 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ApplicationClassLoader.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.util;
import java.net.MalformedURLException;
import java.net.URL;
import java.util.List;
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
/**
* This type has been deprecated in favor of
* {@link org.apache.hadoop.util.ApplicationClassLoader}. All new uses of
* ApplicationClassLoader should use that type instead.
*/
@Public
@Unstable
@Deprecated
public class ApplicationClassLoader extends
org.apache.hadoop.util.ApplicationClassLoader {
public ApplicationClassLoader(URL[] urls, ClassLoader parent,
List<String> systemClasses) {
super(urls, parent, systemClasses);
}
public ApplicationClassLoader(String classpath, ClassLoader parent,
List<String> systemClasses) throws MalformedURLException {
super(classpath, parent, systemClasses);
}
}
| 1,706 | 34.5625 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/StringHelper.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.util;
import com.google.common.base.Joiner;
import com.google.common.base.Splitter;
import java.util.regex.Pattern;
import org.apache.hadoop.classification.InterfaceAudience.Private;
/**
* Common string manipulation helpers
*/
@Private
public final class StringHelper {
// Common joiners to avoid per join creation of joiners
public static final Joiner SSV_JOINER = Joiner.on(' ');
public static final Joiner CSV_JOINER = Joiner.on(',');
public static final Joiner JOINER = Joiner.on("");
public static final Joiner _JOINER = Joiner.on('_');
public static final Joiner PATH_JOINER = Joiner.on('/');
public static final Joiner PATH_ARG_JOINER = Joiner.on("/:");
public static final Joiner DOT_JOINER = Joiner.on('.');
public static final Splitter SSV_SPLITTER =
Splitter.on(' ').omitEmptyStrings().trimResults();
public static final Splitter _SPLITTER = Splitter.on('_').trimResults();
private static final Pattern ABS_URL_RE =Pattern.compile("^(?:\\w+:)?//");
/**
* Join on space.
* @param args to join
* @return args joined by space
*/
public static String sjoin(Object... args) {
return SSV_JOINER.join(args);
}
/**
* Join on comma.
* @param args to join
* @return args joined by comma
*/
public static String cjoin(Object... args) {
return CSV_JOINER.join(args);
}
/**
* Join on dot
* @param args to join
* @return args joined by dot
*/
public static String djoin(Object... args) {
return DOT_JOINER.join(args);
}
/**
* Join on underscore
* @param args to join
* @return args joined underscore
*/
public static String _join(Object... args) {
return _JOINER.join(args);
}
/**
* Join on slash
* @param args to join
* @return args joined with slash
*/
public static String pjoin(Object... args) {
return PATH_JOINER.join(args);
}
/**
* Join on slash and colon (e.g., path args in routing spec)
* @param args to join
* @return args joined with /:
*/
public static String pajoin(Object... args) {
return PATH_ARG_JOINER.join(args);
}
/**
* Join without separator
* @param args
* @return joined args with no separator
*/
public static String join(Object... args) {
return JOINER.join(args);
}
/**
* Join with a separator
* @param sep the separator
* @param args to join
* @return args joined with a separator
*/
public static String joins(String sep, Object...args) {
return Joiner.on(sep).join(args);
}
/**
* Split on space and trim results.
* @param s the string to split
* @return an iterable of strings
*/
public static Iterable<String> split(CharSequence s) {
return SSV_SPLITTER.split(s);
}
/**
* Split on _ and trim results
* @param s the string to split
* @return an iterable of strings
*/
public static Iterable<String> _split(CharSequence s) {
return _SPLITTER.split(s);
}
/**
* Check whether a url is absolute or note
* @param url to check
* @return true if url starts with scheme:// or //
*/
public static boolean isAbsUrl(CharSequence url) {
return ABS_URL_RE.matcher(url).find();
}
/**
* Join url components
* @param pathPrefix for relative urls
* @param args url components to join
* @return an url string
*/
public static String ujoin(String pathPrefix, String... args) {
StringBuilder sb = new StringBuilder();
boolean first = true;
for (String part : args) {
if (first) {
first = false;
if (part.startsWith("#") || isAbsUrl(part)) {
sb.append(part);
} else {
uappend(sb, pathPrefix);
uappend(sb, part);
}
} else {
uappend(sb, part);
}
}
return sb.toString();
}
private static void uappend(StringBuilder sb, String part) {
if((sb.length() <= 0 || sb.charAt(sb.length() - 1) != '/')
&& !part.startsWith("/")) {
sb.append('/');
}
sb.append(part);
}
}
| 4,839 | 26.191011 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ResourceCalculatorProcessTree.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.util;
import java.lang.reflect.Constructor;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Evolving;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
/**
* Interface class to obtain process resource usage
* NOTE: This class should not be used by external users, but only by external
* developers to extend and include their own process-tree implementation,
* especially for platforms other than Linux and Windows.
*/
@Public
@Evolving
public abstract class ResourceCalculatorProcessTree extends Configured {
static final Log LOG = LogFactory
.getLog(ResourceCalculatorProcessTree.class);
public static final int UNAVAILABLE = -1;
/**
* Create process-tree instance with specified root process.
*
* Subclass must override this.
* @param root process-tree root-process
*/
public ResourceCalculatorProcessTree(String root) {
}
/**
* Update the process-tree with latest state.
*
* Each call to this function should increment the age of the running
* processes that already exist in the process tree. Age is used other API's
* of the interface.
*
*/
public abstract void updateProcessTree();
/**
* Get a dump of the process-tree.
*
* @return a string concatenating the dump of information of all the processes
* in the process-tree
*/
public abstract String getProcessTreeDump();
/**
* Get the virtual memory used by all the processes in the
* process-tree.
*
* @return virtual memory used by the process-tree in bytes,
* {@link #UNAVAILABLE} if it cannot be calculated.
*/
public long getVirtualMemorySize() {
return getVirtualMemorySize(0);
}
/**
* Get the virtual memory used by all the processes in the
* process-tree.
*
* @return virtual memory used by the process-tree in bytes,
* {@link #UNAVAILABLE} if it cannot be calculated.
*/
@Deprecated
public long getCumulativeVmem() {
return getCumulativeVmem(0);
}
/**
* Get the resident set size (rss) memory used by all the processes
* in the process-tree.
*
* @return rss memory used by the process-tree in bytes,
* {@link #UNAVAILABLE} if it cannot be calculated.
*/
public long getRssMemorySize() {
return getRssMemorySize(0);
}
/**
* Get the resident set size (rss) memory used by all the processes
* in the process-tree.
*
* @return rss memory used by the process-tree in bytes,
* {@link #UNAVAILABLE} if it cannot be calculated.
*/
@Deprecated
public long getCumulativeRssmem() {
return getCumulativeRssmem(0);
}
/**
* Get the virtual memory used by all the processes in the
* process-tree that are older than the passed in age.
*
* @param olderThanAge processes above this age are included in the
* memory addition
* @return virtual memory used by the process-tree in bytes for
* processes older than the specified age, {@link #UNAVAILABLE} if it
* cannot be calculated.
*/
public long getVirtualMemorySize(int olderThanAge) {
return UNAVAILABLE;
}
/**
* Get the virtual memory used by all the processes in the
* process-tree that are older than the passed in age.
*
* @param olderThanAge processes above this age are included in the
* memory addition
* @return virtual memory used by the process-tree in bytes for
* processes older than the specified age, {@link #UNAVAILABLE} if it
* cannot be calculated.
*/
@Deprecated
public long getCumulativeVmem(int olderThanAge) {
return UNAVAILABLE;
}
/**
* Get the resident set size (rss) memory used by all the processes
* in the process-tree that are older than the passed in age.
*
* @param olderThanAge processes above this age are included in the
* memory addition
* @return rss memory used by the process-tree in bytes for
* processes older than specified age, {@link #UNAVAILABLE} if it cannot be
* calculated.
*/
public long getRssMemorySize(int olderThanAge) {
return UNAVAILABLE;
}
/**
* Get the resident set size (rss) memory used by all the processes
* in the process-tree that are older than the passed in age.
*
* @param olderThanAge processes above this age are included in the
* memory addition
* @return rss memory used by the process-tree in bytes for
* processes older than specified age, {@link #UNAVAILABLE} if it cannot be
* calculated.
*/
@Deprecated
public long getCumulativeRssmem(int olderThanAge) {
return UNAVAILABLE;
}
/**
* Get the CPU time in millisecond used by all the processes in the
* process-tree since the process-tree was created
*
* @return cumulative CPU time in millisecond since the process-tree
* created, {@link #UNAVAILABLE} if it cannot be calculated.
*/
public long getCumulativeCpuTime() {
return UNAVAILABLE;
}
/**
* Get the CPU usage by all the processes in the process-tree based on
* average between samples as a ratio of overall CPU cycles similar to top.
* Thus, if 2 out of 4 cores are used this should return 200.0.
*
* @return percentage CPU usage since the process-tree was created,
* {@link #UNAVAILABLE} if it cannot be calculated.
*/
public float getCpuUsagePercent() {
return UNAVAILABLE;
}
/** Verify that the tree process id is same as its process group id.
* @return true if the process id matches else return false.
*/
public abstract boolean checkPidPgrpidForMatch();
/**
* Create the ResourceCalculatorProcessTree rooted to specified process
* from the class name and configure it. If class name is null, this method
* will try and return a process tree plugin available for this system.
*
* @param pid process pid of the root of the process tree
* @param clazz class-name
* @param conf configure the plugin with this.
*
* @return ResourceCalculatorProcessTree or null if ResourceCalculatorPluginTree
* is not available for this system.
*/
public static ResourceCalculatorProcessTree getResourceCalculatorProcessTree(
String pid, Class<? extends ResourceCalculatorProcessTree> clazz, Configuration conf) {
if (clazz != null) {
try {
Constructor <? extends ResourceCalculatorProcessTree> c = clazz.getConstructor(String.class);
ResourceCalculatorProcessTree rctree = c.newInstance(pid);
rctree.setConf(conf);
return rctree;
} catch(Exception e) {
throw new RuntimeException(e);
}
}
// No class given, try a os specific class
if (ProcfsBasedProcessTree.isAvailable()) {
return new ProcfsBasedProcessTree(pid);
}
if (WindowsBasedProcessTree.isAvailable()) {
return new WindowsBasedProcessTree(pid);
}
// Not supported on this system.
return null;
}
}
| 8,016 | 32.26556 | 101 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ResourceCalculatorPlugin.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.util;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.util.ReflectionUtils;
import org.apache.hadoop.util.SysInfo;
/**
* Plugin to calculate resource information on the system.
*/
@InterfaceAudience.LimitedPrivate({"YARN", "MAPREDUCE"})
@InterfaceStability.Unstable
public class ResourceCalculatorPlugin extends Configured {
private static final Log LOG =
LogFactory.getLog(ResourceCalculatorPlugin.class);
private final SysInfo sys;
protected ResourceCalculatorPlugin() {
this(SysInfo.newInstance());
}
public ResourceCalculatorPlugin(SysInfo sys) {
this.sys = sys;
}
/**
* Obtain the total size of the virtual memory present in the system.
*
* @return virtual memory size in bytes.
*/
public long getVirtualMemorySize() {
return sys.getVirtualMemorySize();
}
/**
* Obtain the total size of the physical memory present in the system.
*
* @return physical memory size bytes.
*/
public long getPhysicalMemorySize() {
return sys.getPhysicalMemorySize();
}
/**
* Obtain the total size of the available virtual memory present
* in the system.
*
* @return available virtual memory size in bytes.
*/
public long getAvailableVirtualMemorySize() {
return sys.getAvailableVirtualMemorySize();
}
/**
* Obtain the total size of the available physical memory present
* in the system.
*
* @return available physical memory size bytes.
*/
public long getAvailablePhysicalMemorySize() {
return sys.getAvailablePhysicalMemorySize();
}
/**
* Obtain the total number of logical processors present on the system.
*
* @return number of logical processors
*/
public int getNumProcessors() {
return sys.getNumProcessors();
}
/**
* Obtain total number of physical cores present on the system.
*
* @return number of physical cores
*/
public int getNumCores() {
return sys.getNumCores();
}
/**
* Obtain the CPU frequency of on the system.
*
* @return CPU frequency in kHz
*/
public long getCpuFrequency() {
return sys.getCpuFrequency();
}
/**
* Obtain the cumulative CPU time since the system is on.
*
* @return cumulative CPU time in milliseconds
*/
public long getCumulativeCpuTime() {
return sys.getCumulativeCpuTime();
}
/**
* Obtain the CPU usage % of the machine. Return -1 if it is unavailable
*
* @return CPU usage in %
*/
public float getCpuUsage() {
return sys.getCpuUsage();
}
/**
* Obtain the aggregated number of bytes read over the network.
* @return total number of bytes read.
*/
public long getNetworkBytesRead() {
return sys.getNetworkBytesRead();
}
/**
* Obtain the aggregated number of bytes written to the network.
* @return total number of bytes written.
*/
public long getNetworkBytesWritten() {
return sys.getNetworkBytesWritten();
}
/**
* Obtain the aggregated number of bytes read from disks.
*
* @return total number of bytes read.
*/
public long getStorageBytesRead() {
return sys.getStorageBytesRead();
}
/**
* Obtain the aggregated number of bytes written to disks.
*
* @return total number of bytes written.
*/
public long getStorageBytesWritten() {
return sys.getStorageBytesWritten();
}
/**
* Create the ResourceCalculatorPlugin from the class name and configure it. If
* class name is null, this method will try and return a memory calculator
* plugin available for this system.
*
* @param clazz ResourceCalculator plugin class-name
* @param conf configure the plugin with this.
* @return ResourceCalculatorPlugin or null if ResourceCalculatorPlugin is not
* available for current system
*/
public static ResourceCalculatorPlugin getResourceCalculatorPlugin(
Class<? extends ResourceCalculatorPlugin> clazz, Configuration conf) {
if (clazz != null) {
return ReflectionUtils.newInstance(clazz, conf);
}
try {
return new ResourceCalculatorPlugin();
} catch (Throwable t) {
LOG.warn(t + ": Failed to instantiate default resource calculator.", t);
}
return null;
}
}
| 5,298 | 26.889474 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/Clock.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.util;
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Stable;
/**
* A simple clock interface that gives you time.
*/
@Public
@Stable
public interface Clock {
long getTime();
}
| 1,082 | 32.84375 | 74 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/timeline/package-info.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
@InterfaceAudience.Public
package org.apache.hadoop.yarn.util.timeline;
import org.apache.hadoop.classification.InterfaceAudience;
| 938 | 41.681818 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/timeline/TimelineUtils.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.util.timeline;
import java.io.IOException;
import java.net.InetSocketAddress;
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Evolving;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.util.VersionInfo;
import org.apache.hadoop.yarn.api.records.timeline.TimelineAbout;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.util.YarnVersionInfo;
import org.apache.hadoop.yarn.webapp.YarnJacksonJaxbJsonProvider;
import org.codehaus.jackson.JsonGenerationException;
import org.codehaus.jackson.map.JsonMappingException;
import org.codehaus.jackson.map.ObjectMapper;
/**
* The helper class for the timeline module.
*
*/
@Public
@Evolving
public class TimelineUtils {
private static ObjectMapper mapper;
static {
mapper = new ObjectMapper();
YarnJacksonJaxbJsonProvider.configObjectMapper(mapper);
}
/**
* Serialize a POJO object into a JSON string not in a pretty format
*
* @param o
* an object to serialize
* @return a JSON string
* @throws IOException
* @throws JsonMappingException
* @throws JsonGenerationException
*/
public static String dumpTimelineRecordtoJSON(Object o)
throws JsonGenerationException, JsonMappingException, IOException {
return dumpTimelineRecordtoJSON(o, false);
}
/**
* Serialize a POJO object into a JSON string
*
* @param o
* an object to serialize
* @param pretty
* whether in a pretty format or not
* @return a JSON string
* @throws IOException
* @throws JsonMappingException
* @throws JsonGenerationException
*/
public static String dumpTimelineRecordtoJSON(Object o, boolean pretty)
throws JsonGenerationException, JsonMappingException, IOException {
if (pretty) {
return mapper.writerWithDefaultPrettyPrinter().writeValueAsString(o);
} else {
return mapper.writeValueAsString(o);
}
}
public static TimelineAbout createTimelineAbout(String about) {
TimelineAbout tsInfo = new TimelineAbout(about);
tsInfo.setHadoopBuildVersion(VersionInfo.getBuildVersion());
tsInfo.setHadoopVersion(VersionInfo.getVersion());
tsInfo.setHadoopVersionBuiltOn(VersionInfo.getDate());
tsInfo.setTimelineServiceBuildVersion(YarnVersionInfo.getBuildVersion());
tsInfo.setTimelineServiceVersion(YarnVersionInfo.getVersion());
tsInfo.setTimelineServiceVersionBuiltOn(YarnVersionInfo.getDate());
return tsInfo;
}
public static InetSocketAddress getTimelineTokenServiceAddress(
Configuration conf) {
InetSocketAddress timelineServiceAddr = null;
if (YarnConfiguration.useHttps(conf)) {
timelineServiceAddr = conf.getSocketAddr(
YarnConfiguration.TIMELINE_SERVICE_WEBAPP_HTTPS_ADDRESS,
YarnConfiguration.DEFAULT_TIMELINE_SERVICE_WEBAPP_HTTPS_ADDRESS,
YarnConfiguration.DEFAULT_TIMELINE_SERVICE_WEBAPP_HTTPS_PORT);
} else {
timelineServiceAddr = conf.getSocketAddr(
YarnConfiguration.TIMELINE_SERVICE_WEBAPP_ADDRESS,
YarnConfiguration.DEFAULT_TIMELINE_SERVICE_WEBAPP_ADDRESS,
YarnConfiguration.DEFAULT_TIMELINE_SERVICE_WEBAPP_PORT);
}
return timelineServiceAddr;
}
public static Text buildTimelineTokenService(Configuration conf) {
InetSocketAddress timelineServiceAddr =
getTimelineTokenServiceAddress(conf);
return SecurityUtil.buildTokenService(timelineServiceAddr);
}
}
| 4,453 | 35.211382 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.util.resource;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.util.Records;
@InterfaceAudience.LimitedPrivate({"YARN", "MapReduce"})
@Unstable
public class Resources {
// Java doesn't have const :(
private static final Resource NONE = new Resource() {
@Override
public int getMemory() {
return 0;
}
@Override
public void setMemory(int memory) {
throw new RuntimeException("NONE cannot be modified!");
}
@Override
public int getVirtualCores() {
return 0;
}
@Override
public void setVirtualCores(int cores) {
throw new RuntimeException("NONE cannot be modified!");
}
@Override
public int compareTo(Resource o) {
int diff = 0 - o.getMemory();
if (diff == 0) {
diff = 0 - o.getVirtualCores();
}
return diff;
}
};
private static final Resource UNBOUNDED = new Resource() {
@Override
public int getMemory() {
return Integer.MAX_VALUE;
}
@Override
public void setMemory(int memory) {
throw new RuntimeException("UNBOUNDED cannot be modified!");
}
@Override
public int getVirtualCores() {
return Integer.MAX_VALUE;
}
@Override
public void setVirtualCores(int cores) {
throw new RuntimeException("UNBOUNDED cannot be modified!");
}
@Override
public int compareTo(Resource o) {
int diff = Integer.MAX_VALUE - o.getMemory();
if (diff == 0) {
diff = Integer.MAX_VALUE - o.getVirtualCores();
}
return diff;
}
};
public static Resource createResource(int memory) {
return createResource(memory, (memory > 0) ? 1 : 0);
}
public static Resource createResource(int memory, int cores) {
Resource resource = Records.newRecord(Resource.class);
resource.setMemory(memory);
resource.setVirtualCores(cores);
return resource;
}
public static Resource none() {
return NONE;
}
public static Resource unbounded() {
return UNBOUNDED;
}
public static Resource clone(Resource res) {
return createResource(res.getMemory(), res.getVirtualCores());
}
public static Resource addTo(Resource lhs, Resource rhs) {
lhs.setMemory(lhs.getMemory() + rhs.getMemory());
lhs.setVirtualCores(lhs.getVirtualCores() + rhs.getVirtualCores());
return lhs;
}
public static Resource add(Resource lhs, Resource rhs) {
return addTo(clone(lhs), rhs);
}
public static Resource subtractFrom(Resource lhs, Resource rhs) {
lhs.setMemory(lhs.getMemory() - rhs.getMemory());
lhs.setVirtualCores(lhs.getVirtualCores() - rhs.getVirtualCores());
return lhs;
}
public static Resource subtract(Resource lhs, Resource rhs) {
return subtractFrom(clone(lhs), rhs);
}
public static Resource negate(Resource resource) {
return subtract(NONE, resource);
}
public static Resource multiplyTo(Resource lhs, double by) {
lhs.setMemory((int)(lhs.getMemory() * by));
lhs.setVirtualCores((int)(lhs.getVirtualCores() * by));
return lhs;
}
public static Resource multiply(Resource lhs, double by) {
return multiplyTo(clone(lhs), by);
}
/**
* Multiply @param rhs by @param by, and add the result to @param lhs
* without creating any new {@link Resource} object
*/
public static Resource multiplyAndAddTo(
Resource lhs, Resource rhs, double by) {
lhs.setMemory(lhs.getMemory() + (int)(rhs.getMemory() * by));
lhs.setVirtualCores(lhs.getVirtualCores()
+ (int)(rhs.getVirtualCores() * by));
return lhs;
}
public static Resource multiplyAndNormalizeUp(
ResourceCalculator calculator,Resource lhs, double by, Resource factor) {
return calculator.multiplyAndNormalizeUp(lhs, by, factor);
}
public static Resource multiplyAndNormalizeDown(
ResourceCalculator calculator,Resource lhs, double by, Resource factor) {
return calculator.multiplyAndNormalizeDown(lhs, by, factor);
}
public static Resource multiplyAndRoundDown(Resource lhs, double by) {
Resource out = clone(lhs);
out.setMemory((int)(lhs.getMemory() * by));
out.setVirtualCores((int)(lhs.getVirtualCores() * by));
return out;
}
public static Resource normalize(
ResourceCalculator calculator, Resource lhs, Resource min,
Resource max, Resource increment) {
return calculator.normalize(lhs, min, max, increment);
}
public static Resource roundUp(
ResourceCalculator calculator, Resource lhs, Resource factor) {
return calculator.roundUp(lhs, factor);
}
public static Resource roundDown(
ResourceCalculator calculator, Resource lhs, Resource factor) {
return calculator.roundDown(lhs, factor);
}
public static boolean isInvalidDivisor(
ResourceCalculator resourceCalculator, Resource divisor) {
return resourceCalculator.isInvalidDivisor(divisor);
}
public static float ratio(
ResourceCalculator resourceCalculator, Resource lhs, Resource rhs) {
return resourceCalculator.ratio(lhs, rhs);
}
public static float divide(
ResourceCalculator resourceCalculator,
Resource clusterResource, Resource lhs, Resource rhs) {
return resourceCalculator.divide(clusterResource, lhs, rhs);
}
public static Resource divideAndCeil(
ResourceCalculator resourceCalculator, Resource lhs, int rhs) {
return resourceCalculator.divideAndCeil(lhs, rhs);
}
public static boolean equals(Resource lhs, Resource rhs) {
return lhs.equals(rhs);
}
public static boolean lessThan(
ResourceCalculator resourceCalculator,
Resource clusterResource,
Resource lhs, Resource rhs) {
return (resourceCalculator.compare(clusterResource, lhs, rhs) < 0);
}
public static boolean lessThanOrEqual(
ResourceCalculator resourceCalculator,
Resource clusterResource,
Resource lhs, Resource rhs) {
return (resourceCalculator.compare(clusterResource, lhs, rhs) <= 0);
}
public static boolean greaterThan(
ResourceCalculator resourceCalculator,
Resource clusterResource,
Resource lhs, Resource rhs) {
return resourceCalculator.compare(clusterResource, lhs, rhs) > 0;
}
public static boolean greaterThanOrEqual(
ResourceCalculator resourceCalculator,
Resource clusterResource,
Resource lhs, Resource rhs) {
return resourceCalculator.compare(clusterResource, lhs, rhs) >= 0;
}
public static Resource min(
ResourceCalculator resourceCalculator,
Resource clusterResource,
Resource lhs, Resource rhs) {
return resourceCalculator.compare(clusterResource, lhs, rhs) <= 0 ? lhs : rhs;
}
public static Resource max(
ResourceCalculator resourceCalculator,
Resource clusterResource,
Resource lhs, Resource rhs) {
return resourceCalculator.compare(clusterResource, lhs, rhs) >= 0 ? lhs : rhs;
}
public static boolean fitsIn(Resource smaller, Resource bigger) {
return smaller.getMemory() <= bigger.getMemory() &&
smaller.getVirtualCores() <= bigger.getVirtualCores();
}
public static Resource componentwiseMin(Resource lhs, Resource rhs) {
return createResource(Math.min(lhs.getMemory(), rhs.getMemory()),
Math.min(lhs.getVirtualCores(), rhs.getVirtualCores()));
}
public static Resource componentwiseMax(Resource lhs, Resource rhs) {
return createResource(Math.max(lhs.getMemory(), rhs.getMemory()),
Math.max(lhs.getVirtualCores(), rhs.getVirtualCores()));
}
}
| 8,534 | 29.373665 | 82 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceCalculator.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.util.resource;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.yarn.api.records.Resource;
/**
* A set of {@link Resource} comparison and manipulation interfaces.
*/
@Private
@Unstable
public abstract class ResourceCalculator {
public abstract int
compare(Resource clusterResource, Resource lhs, Resource rhs);
public static int divideAndCeil(int a, int b) {
if (b == 0) {
return 0;
}
return (a + (b - 1)) / b;
}
public static int roundUp(int a, int b) {
return divideAndCeil(a, b) * b;
}
public static int roundDown(int a, int b) {
return (a / b) * b;
}
/**
* Compute the number of containers which can be allocated given
* <code>available</code> and <code>required</code> resources.
*
* @param available available resources
* @param required required resources
* @return number of containers which can be allocated
*/
public abstract int computeAvailableContainers(
Resource available, Resource required);
/**
* Multiply resource <code>r</code> by factor <code>by</code>
* and normalize up using step-factor <code>stepFactor</code>.
*
* @param r resource to be multiplied
* @param by multiplier
* @param stepFactor factor by which to normalize up
* @return resulting normalized resource
*/
public abstract Resource multiplyAndNormalizeUp(
Resource r, double by, Resource stepFactor);
/**
* Multiply resource <code>r</code> by factor <code>by</code>
* and normalize down using step-factor <code>stepFactor</code>.
*
* @param r resource to be multiplied
* @param by multiplier
* @param stepFactor factor by which to normalize down
* @return resulting normalized resource
*/
public abstract Resource multiplyAndNormalizeDown(
Resource r, double by, Resource stepFactor);
/**
* Normalize resource <code>r</code> given the base
* <code>minimumResource</code> and verify against max allowed
* <code>maximumResource</code>
*
* @param r resource
* @param minimumResource step-factor
* @param maximumResource the upper bound of the resource to be allocated
* @return normalized resource
*/
public Resource normalize(Resource r, Resource minimumResource,
Resource maximumResource) {
return normalize(r, minimumResource, maximumResource, minimumResource);
}
/**
* Normalize resource <code>r</code> given the base
* <code>minimumResource</code> and verify against max allowed
* <code>maximumResource</code> using a step factor for hte normalization.
*
* @param r resource
* @param minimumResource minimum value
* @param maximumResource the upper bound of the resource to be allocated
* @param stepFactor the increment for resources to be allocated
* @return normalized resource
*/
public abstract Resource normalize(Resource r, Resource minimumResource,
Resource maximumResource,
Resource stepFactor);
/**
* Round-up resource <code>r</code> given factor <code>stepFactor</code>.
*
* @param r resource
* @param stepFactor step-factor
* @return rounded resource
*/
public abstract Resource roundUp(Resource r, Resource stepFactor);
/**
* Round-down resource <code>r</code> given factor <code>stepFactor</code>.
*
* @param r resource
* @param stepFactor step-factor
* @return rounded resource
*/
public abstract Resource roundDown(Resource r, Resource stepFactor);
/**
* Divide resource <code>numerator</code> by resource <code>denominator</code>
* using specified policy (domination, average, fairness etc.); hence overall
* <code>clusterResource</code> is provided for context.
*
* @param clusterResource cluster resources
* @param numerator numerator
* @param denominator denominator
* @return <code>numerator</code>/<code>denominator</code>
* using specific policy
*/
public abstract float divide(
Resource clusterResource, Resource numerator, Resource denominator);
/**
* Determine if a resource is not suitable for use as a divisor
* (will result in divide by 0, etc)
*
* @param r resource
* @return true if divisor is invalid (should not be used), false else
*/
public abstract boolean isInvalidDivisor(Resource r);
/**
* Ratio of resource <code>a</code> to resource <code>b</code>.
*
* @param a resource
* @param b resource
* @return ratio of resource <code>a</code> to resource <code>b</code>
*/
public abstract float ratio(Resource a, Resource b);
/**
* Divide-and-ceil <code>numerator</code> by <code>denominator</code>.
*
* @param numerator numerator resource
* @param denominator denominator
* @return resultant resource
*/
public abstract Resource divideAndCeil(Resource numerator, int denominator);
}
| 5,842 | 32.388571 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.util.resource;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.yarn.api.records.Resource;
/**
* A {@link ResourceCalculator} which uses the concept of
* <em>dominant resource</em> to compare multi-dimensional resources.
*
* Essentially the idea is that the in a multi-resource environment,
* the resource allocation should be determined by the dominant share
* of an entity (user or queue), which is the maximum share that the
* entity has been allocated of any resource.
*
* In a nutshell, it seeks to maximize the minimum dominant share across
* all entities.
*
* For example, if user A runs CPU-heavy tasks and user B runs
* memory-heavy tasks, it attempts to equalize CPU share of user A
* with Memory-share of user B.
*
* In the single resource case, it reduces to max-min fairness for that resource.
*
* See the Dominant Resource Fairness paper for more details:
* www.cs.berkeley.edu/~matei/papers/2011/nsdi_drf.pdf
*/
@Private
@Unstable
public class DominantResourceCalculator extends ResourceCalculator {
@Override
public int compare(Resource clusterResource, Resource lhs, Resource rhs) {
if (lhs.equals(rhs)) {
return 0;
}
if (isInvalidDivisor(clusterResource)) {
if ((lhs.getMemory() < rhs.getMemory() && lhs.getVirtualCores() > rhs
.getVirtualCores())
|| (lhs.getMemory() > rhs.getMemory() && lhs.getVirtualCores() < rhs
.getVirtualCores())) {
return 0;
} else if (lhs.getMemory() > rhs.getMemory()
|| lhs.getVirtualCores() > rhs.getVirtualCores()) {
return 1;
} else if (lhs.getMemory() < rhs.getMemory()
|| lhs.getVirtualCores() < rhs.getVirtualCores()) {
return -1;
}
}
float l = getResourceAsValue(clusterResource, lhs, true);
float r = getResourceAsValue(clusterResource, rhs, true);
if (l < r) {
return -1;
} else if (l > r) {
return 1;
} else {
l = getResourceAsValue(clusterResource, lhs, false);
r = getResourceAsValue(clusterResource, rhs, false);
if (l < r) {
return -1;
} else if (l > r) {
return 1;
}
}
return 0;
}
/**
* Use 'dominant' for now since we only have 2 resources - gives us a slight
* performance boost.
*
* Once we add more resources, we'll need a more complicated (and slightly
* less performant algorithm).
*/
protected float getResourceAsValue(
Resource clusterResource, Resource resource, boolean dominant) {
// Just use 'dominant' resource
return (dominant) ?
Math.max(
(float)resource.getMemory() / clusterResource.getMemory(),
(float)resource.getVirtualCores() / clusterResource.getVirtualCores()
)
:
Math.min(
(float)resource.getMemory() / clusterResource.getMemory(),
(float)resource.getVirtualCores() / clusterResource.getVirtualCores()
);
}
@Override
public int computeAvailableContainers(Resource available, Resource required) {
return Math.min(
available.getMemory() / required.getMemory(),
available.getVirtualCores() / required.getVirtualCores());
}
@Override
public float divide(Resource clusterResource,
Resource numerator, Resource denominator) {
return
getResourceAsValue(clusterResource, numerator, true) /
getResourceAsValue(clusterResource, denominator, true);
}
@Override
public boolean isInvalidDivisor(Resource r) {
if (r.getMemory() == 0.0f || r.getVirtualCores() == 0.0f) {
return true;
}
return false;
}
@Override
public float ratio(Resource a, Resource b) {
return Math.max(
(float)a.getMemory()/b.getMemory(),
(float)a.getVirtualCores()/b.getVirtualCores()
);
}
@Override
public Resource divideAndCeil(Resource numerator, int denominator) {
return Resources.createResource(
divideAndCeil(numerator.getMemory(), denominator),
divideAndCeil(numerator.getVirtualCores(), denominator)
);
}
@Override
public Resource normalize(Resource r, Resource minimumResource,
Resource maximumResource, Resource stepFactor) {
int normalizedMemory = Math.min(
roundUp(
Math.max(r.getMemory(), minimumResource.getMemory()),
stepFactor.getMemory()),
maximumResource.getMemory());
int normalizedCores = Math.min(
roundUp(
Math.max(r.getVirtualCores(), minimumResource.getVirtualCores()),
stepFactor.getVirtualCores()),
maximumResource.getVirtualCores());
return Resources.createResource(normalizedMemory,
normalizedCores);
}
@Override
public Resource roundUp(Resource r, Resource stepFactor) {
return Resources.createResource(
roundUp(r.getMemory(), stepFactor.getMemory()),
roundUp(r.getVirtualCores(), stepFactor.getVirtualCores())
);
}
@Override
public Resource roundDown(Resource r, Resource stepFactor) {
return Resources.createResource(
roundDown(r.getMemory(), stepFactor.getMemory()),
roundDown(r.getVirtualCores(), stepFactor.getVirtualCores())
);
}
@Override
public Resource multiplyAndNormalizeUp(Resource r, double by,
Resource stepFactor) {
return Resources.createResource(
roundUp(
(int)Math.ceil(r.getMemory() * by), stepFactor.getMemory()),
roundUp(
(int)Math.ceil(r.getVirtualCores() * by),
stepFactor.getVirtualCores())
);
}
@Override
public Resource multiplyAndNormalizeDown(Resource r, double by,
Resource stepFactor) {
return Resources.createResource(
roundDown(
(int)(r.getMemory() * by),
stepFactor.getMemory()
),
roundDown(
(int)(r.getVirtualCores() * by),
stepFactor.getVirtualCores()
)
);
}
}
| 6,964 | 31.699531 | 83 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DefaultResourceCalculator.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.util.resource;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.yarn.api.records.Resource;
@Private
@Unstable
public class DefaultResourceCalculator extends ResourceCalculator {
@Override
public int compare(Resource unused, Resource lhs, Resource rhs) {
// Only consider memory
return lhs.getMemory() - rhs.getMemory();
}
@Override
public int computeAvailableContainers(Resource available, Resource required) {
// Only consider memory
return available.getMemory() / required.getMemory();
}
@Override
public float divide(Resource unused,
Resource numerator, Resource denominator) {
return ratio(numerator, denominator);
}
public boolean isInvalidDivisor(Resource r) {
if (r.getMemory() == 0.0f) {
return true;
}
return false;
}
@Override
public float ratio(Resource a, Resource b) {
return (float)a.getMemory() / b.getMemory();
}
@Override
public Resource divideAndCeil(Resource numerator, int denominator) {
return Resources.createResource(
divideAndCeil(numerator.getMemory(), denominator));
}
@Override
public Resource normalize(Resource r, Resource minimumResource,
Resource maximumResource, Resource stepFactor) {
int normalizedMemory = Math.min(
roundUp(
Math.max(r.getMemory(), minimumResource.getMemory()),
stepFactor.getMemory()),
maximumResource.getMemory());
return Resources.createResource(normalizedMemory);
}
@Override
public Resource normalize(Resource r, Resource minimumResource,
Resource maximumResource) {
return normalize(r, minimumResource, maximumResource, minimumResource);
}
@Override
public Resource roundUp(Resource r, Resource stepFactor) {
return Resources.createResource(
roundUp(r.getMemory(), stepFactor.getMemory())
);
}
@Override
public Resource roundDown(Resource r, Resource stepFactor) {
return Resources.createResource(
roundDown(r.getMemory(), stepFactor.getMemory()));
}
@Override
public Resource multiplyAndNormalizeUp(Resource r, double by,
Resource stepFactor) {
return Resources.createResource(
roundUp((int)(r.getMemory() * by + 0.5), stepFactor.getMemory())
);
}
@Override
public Resource multiplyAndNormalizeDown(Resource r, double by,
Resource stepFactor) {
return Resources.createResource(
roundDown(
(int)(r.getMemory() * by),
stepFactor.getMemory()
)
);
}
}
| 3,502 | 29.72807 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/CommonNodeLabelsManager.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.nodelabels;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.EnumSet;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import java.util.concurrent.locks.ReentrantReadWriteLock.ReadLock;
import java.util.concurrent.locks.ReentrantReadWriteLock.WriteLock;
import java.util.regex.Pattern;
import org.apache.commons.lang.StringUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.service.AbstractService;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.api.records.NodeLabel;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.event.AsyncDispatcher;
import org.apache.hadoop.yarn.event.Dispatcher;
import org.apache.hadoop.yarn.event.EventHandler;
import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
import org.apache.hadoop.yarn.nodelabels.event.NodeLabelsStoreEvent;
import org.apache.hadoop.yarn.nodelabels.event.NodeLabelsStoreEventType;
import org.apache.hadoop.yarn.nodelabels.event.RemoveClusterNodeLabels;
import org.apache.hadoop.yarn.nodelabels.event.StoreNewClusterNodeLabels;
import org.apache.hadoop.yarn.nodelabels.event.UpdateNodeToLabelsMappingsEvent;
import org.apache.hadoop.yarn.util.resource.Resources;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.collect.ImmutableSet;
@Private
public class CommonNodeLabelsManager extends AbstractService {
protected static final Log LOG = LogFactory.getLog(CommonNodeLabelsManager.class);
private static final int MAX_LABEL_LENGTH = 255;
public static final Set<String> EMPTY_STRING_SET = Collections
.unmodifiableSet(new HashSet<String>(0));
public static final Set<NodeLabel> EMPTY_NODELABEL_SET = Collections
.unmodifiableSet(new HashSet<NodeLabel>(0));
public static final String ANY = "*";
public static final Set<String> ACCESS_ANY_LABEL_SET = ImmutableSet.of(ANY);
private static final Pattern LABEL_PATTERN = Pattern
.compile("^[0-9a-zA-Z][0-9a-zA-Z-_]*");
public static final int WILDCARD_PORT = 0;
/**
* Error messages
*/
@VisibleForTesting
public static final String NODE_LABELS_NOT_ENABLED_ERR =
"Node-label-based scheduling is disabled. Please check "
+ YarnConfiguration.NODE_LABELS_ENABLED;
/**
* If a user doesn't specify label of a queue or node, it belongs
* DEFAULT_LABEL
*/
public static final String NO_LABEL = "";
protected Dispatcher dispatcher;
protected ConcurrentMap<String, RMNodeLabel> labelCollections =
new ConcurrentHashMap<String, RMNodeLabel>();
protected ConcurrentMap<String, Host> nodeCollections =
new ConcurrentHashMap<String, Host>();
protected final ReadLock readLock;
protected final WriteLock writeLock;
protected NodeLabelsStore store;
private boolean nodeLabelsEnabled = false;
private boolean isDistributedNodeLabelConfiguration = false;
/**
* A <code>Host</code> can have multiple <code>Node</code>s
*/
protected static class Host {
public Set<String> labels;
public Map<NodeId, Node> nms;
protected Host() {
labels =
Collections.newSetFromMap(new ConcurrentHashMap<String, Boolean>());
nms = new ConcurrentHashMap<NodeId, Node>();
}
public Host copy() {
Host c = new Host();
c.labels = new HashSet<String>(labels);
for (Entry<NodeId, Node> entry : nms.entrySet()) {
c.nms.put(entry.getKey(), entry.getValue().copy());
}
return c;
}
}
protected static class Node {
public Set<String> labels;
public Resource resource;
public boolean running;
public NodeId nodeId;
protected Node(NodeId nodeid) {
labels = null;
resource = Resource.newInstance(0, 0);
running = false;
nodeId = nodeid;
}
public Node copy() {
Node c = new Node(nodeId);
if (labels != null) {
c.labels =
Collections.newSetFromMap(new ConcurrentHashMap<String, Boolean>());
c.labels.addAll(labels);
} else {
c.labels = null;
}
c.resource = Resources.clone(resource);
c.running = running;
return c;
}
}
private enum NodeLabelUpdateOperation {
ADD,
REMOVE,
REPLACE
}
private final class ForwardingEventHandler implements
EventHandler<NodeLabelsStoreEvent> {
@Override
public void handle(NodeLabelsStoreEvent event) {
handleStoreEvent(event);
}
}
// Dispatcher related code
protected void handleStoreEvent(NodeLabelsStoreEvent event) {
try {
switch (event.getType()) {
case ADD_LABELS:
StoreNewClusterNodeLabels storeNewClusterNodeLabelsEvent =
(StoreNewClusterNodeLabels) event;
store.storeNewClusterNodeLabels(storeNewClusterNodeLabelsEvent
.getLabels());
break;
case REMOVE_LABELS:
RemoveClusterNodeLabels removeClusterNodeLabelsEvent =
(RemoveClusterNodeLabels) event;
store.removeClusterNodeLabels(removeClusterNodeLabelsEvent.getLabels());
break;
case STORE_NODE_TO_LABELS:
UpdateNodeToLabelsMappingsEvent updateNodeToLabelsMappingsEvent =
(UpdateNodeToLabelsMappingsEvent) event;
store.updateNodeToLabelsMappings(updateNodeToLabelsMappingsEvent
.getNodeToLabels());
break;
}
} catch (IOException e) {
LOG.error("Failed to store label modification to storage");
throw new YarnRuntimeException(e);
}
}
public CommonNodeLabelsManager() {
super(CommonNodeLabelsManager.class.getName());
ReentrantReadWriteLock lock = new ReentrantReadWriteLock();
readLock = lock.readLock();
writeLock = lock.writeLock();
}
// for UT purpose
protected void initDispatcher(Configuration conf) {
// create async handler
dispatcher = new AsyncDispatcher();
AsyncDispatcher asyncDispatcher = (AsyncDispatcher) dispatcher;
asyncDispatcher.init(conf);
asyncDispatcher.setDrainEventsOnStop();
}
@Override
protected void serviceInit(Configuration conf) throws Exception {
// set if node labels enabled
nodeLabelsEnabled =
conf.getBoolean(YarnConfiguration.NODE_LABELS_ENABLED,
YarnConfiguration.DEFAULT_NODE_LABELS_ENABLED);
isDistributedNodeLabelConfiguration =
YarnConfiguration.isDistributedNodeLabelConfiguration(conf);
labelCollections.put(NO_LABEL, new RMNodeLabel(NO_LABEL));
}
protected void initNodeLabelStore(Configuration conf) throws Exception {
this.store = new FileSystemNodeLabelsStore(this);
this.store.init(conf);
this.store.recover(isDistributedNodeLabelConfiguration);
}
// for UT purpose
protected void startDispatcher() {
// start dispatcher
AsyncDispatcher asyncDispatcher = (AsyncDispatcher) dispatcher;
asyncDispatcher.start();
}
@Override
protected void serviceStart() throws Exception {
if (nodeLabelsEnabled) {
initNodeLabelStore(getConfig());
}
// init dispatcher only when service start, because recover will happen in
// service init, we don't want to trigger any event handling at that time.
initDispatcher(getConfig());
if (null != dispatcher) {
dispatcher.register(NodeLabelsStoreEventType.class,
new ForwardingEventHandler());
}
startDispatcher();
}
// for UT purpose
protected void stopDispatcher() {
AsyncDispatcher asyncDispatcher = (AsyncDispatcher) dispatcher;
if (null != asyncDispatcher) {
asyncDispatcher.stop();
}
}
@Override
protected void serviceStop() throws Exception {
// finalize store
stopDispatcher();
// only close store when we enabled store persistent
if (null != store) {
store.close();
}
}
@SuppressWarnings("unchecked")
public void addToCluserNodeLabels(Collection<NodeLabel> labels)
throws IOException {
if (!nodeLabelsEnabled) {
LOG.error(NODE_LABELS_NOT_ENABLED_ERR);
throw new IOException(NODE_LABELS_NOT_ENABLED_ERR);
}
if (null == labels || labels.isEmpty()) {
return;
}
List<NodeLabel> newLabels = new ArrayList<NodeLabel>();
normalizeNodeLabels(labels);
// check any mismatch in exclusivity no mismatch with skip
checkExclusivityMatch(labels);
// do a check before actual adding them, will throw exception if any of them
// doesn't meet label name requirement
for (NodeLabel label : labels) {
checkAndThrowLabelName(label.getName());
}
for (NodeLabel label : labels) {
// shouldn't overwrite it to avoid changing the Label.resource
if (this.labelCollections.get(label.getName()) == null) {
this.labelCollections.put(label.getName(), new RMNodeLabel(label));
newLabels.add(label);
}
}
if (null != dispatcher && !newLabels.isEmpty()) {
dispatcher.getEventHandler().handle(
new StoreNewClusterNodeLabels(newLabels));
}
LOG.info("Add labels: [" + StringUtils.join(labels.iterator(), ",") + "]");
}
/**
* Add multiple node labels to repository
*
* @param labels
* new node labels added
*/
@VisibleForTesting
public void addToCluserNodeLabelsWithDefaultExclusivity(Set<String> labels)
throws IOException {
Set<NodeLabel> nodeLabels = new HashSet<NodeLabel>();
for (String label : labels) {
nodeLabels.add(NodeLabel.newInstance(label));
}
addToCluserNodeLabels(nodeLabels);
}
protected void checkAddLabelsToNode(
Map<NodeId, Set<String>> addedLabelsToNode) throws IOException {
if (null == addedLabelsToNode || addedLabelsToNode.isEmpty()) {
return;
}
// check all labels being added existed
Set<String> knownLabels = labelCollections.keySet();
for (Entry<NodeId, Set<String>> entry : addedLabelsToNode.entrySet()) {
NodeId nodeId = entry.getKey();
Set<String> labels = entry.getValue();
if (!knownLabels.containsAll(labels)) {
String msg =
"Not all labels being added contained by known "
+ "label collections, please check" + ", added labels=["
+ StringUtils.join(labels, ",") + "]";
LOG.error(msg);
throw new IOException(msg);
}
// In YARN-2694, we temporarily disable user add more than 1 labels on a
// same host
if (!labels.isEmpty()) {
Set<String> newLabels = new HashSet<String>(getLabelsByNode(nodeId));
newLabels.addAll(labels);
// we don't allow number of labels on a node > 1 after added labels
if (newLabels.size() > 1) {
String msg =
String.format(
"%d labels specified on host=%s after add labels to node"
+ ", please note that we do not support specifying multiple"
+ " labels on a single host for now.",
newLabels.size(), nodeId.getHost());
LOG.error(msg);
throw new IOException(msg);
}
}
}
}
/**
* add more labels to nodes
*
* @param addedLabelsToNode node {@literal ->} labels map
*/
public void addLabelsToNode(Map<NodeId, Set<String>> addedLabelsToNode)
throws IOException {
if (!nodeLabelsEnabled) {
LOG.error(NODE_LABELS_NOT_ENABLED_ERR);
throw new IOException(NODE_LABELS_NOT_ENABLED_ERR);
}
addedLabelsToNode = normalizeNodeIdToLabels(addedLabelsToNode);
checkAddLabelsToNode(addedLabelsToNode);
internalUpdateLabelsOnNodes(addedLabelsToNode, NodeLabelUpdateOperation.ADD);
}
protected void checkRemoveFromClusterNodeLabels(
Collection<String> labelsToRemove) throws IOException {
if (null == labelsToRemove || labelsToRemove.isEmpty()) {
return;
}
// Check if label to remove doesn't existed or null/empty, will throw
// exception if any of labels to remove doesn't meet requirement
for (String label : labelsToRemove) {
label = normalizeLabel(label);
if (label == null || label.isEmpty()) {
throw new IOException("Label to be removed is null or empty");
}
if (!labelCollections.containsKey(label)) {
throw new IOException("Node label=" + label
+ " to be removed doesn't existed in cluster "
+ "node labels collection.");
}
}
}
@SuppressWarnings("unchecked")
protected void internalRemoveFromClusterNodeLabels(Collection<String> labelsToRemove) {
// remove labels from nodes
for (Map.Entry<String,Host> nodeEntry : nodeCollections.entrySet()) {
Host host = nodeEntry.getValue();
if (null != host) {
host.labels.removeAll(labelsToRemove);
for (Node nm : host.nms.values()) {
if (nm.labels != null) {
nm.labels.removeAll(labelsToRemove);
}
}
}
}
// remove labels from node labels collection
for (String label : labelsToRemove) {
labelCollections.remove(label);
}
// create event to remove labels
if (null != dispatcher) {
dispatcher.getEventHandler().handle(
new RemoveClusterNodeLabels(labelsToRemove));
}
LOG.info("Remove labels: ["
+ StringUtils.join(labelsToRemove.iterator(), ",") + "]");
}
/**
* Remove multiple node labels from repository
*
* @param labelsToRemove
* node labels to remove
* @throws IOException
*/
public void removeFromClusterNodeLabels(Collection<String> labelsToRemove)
throws IOException {
if (!nodeLabelsEnabled) {
LOG.error(NODE_LABELS_NOT_ENABLED_ERR);
throw new IOException(NODE_LABELS_NOT_ENABLED_ERR);
}
labelsToRemove = normalizeLabels(labelsToRemove);
checkRemoveFromClusterNodeLabels(labelsToRemove);
internalRemoveFromClusterNodeLabels(labelsToRemove);
}
protected void checkRemoveLabelsFromNode(
Map<NodeId, Set<String>> removeLabelsFromNode) throws IOException {
// check all labels being added existed
Set<String> knownLabels = labelCollections.keySet();
for (Entry<NodeId, Set<String>> entry : removeLabelsFromNode.entrySet()) {
NodeId nodeId = entry.getKey();
Set<String> labels = entry.getValue();
if (!knownLabels.containsAll(labels)) {
String msg =
"Not all labels being removed contained by known "
+ "label collections, please check" + ", removed labels=["
+ StringUtils.join(labels, ",") + "]";
LOG.error(msg);
throw new IOException(msg);
}
Set<String> originalLabels = null;
boolean nodeExisted = false;
if (WILDCARD_PORT != nodeId.getPort()) {
Node nm = getNMInNodeSet(nodeId);
if (nm != null) {
originalLabels = nm.labels;
nodeExisted = true;
}
} else {
Host host = nodeCollections.get(nodeId.getHost());
if (null != host) {
originalLabels = host.labels;
nodeExisted = true;
}
}
if (!nodeExisted) {
String msg =
"Try to remove labels from NM=" + nodeId
+ ", but the NM doesn't existed";
LOG.error(msg);
throw new IOException(msg);
}
// the labels will never be null
if (labels.isEmpty()) {
continue;
}
// originalLabels may be null,
// because when a Node is created, Node.labels can be null.
if (originalLabels == null || !originalLabels.containsAll(labels)) {
String msg =
"Try to remove labels = [" + StringUtils.join(labels, ",")
+ "], but not all labels contained by NM=" + nodeId;
LOG.error(msg);
throw new IOException(msg);
}
}
}
private void addNodeToLabels(NodeId node, Set<String> labels) {
for(String l : labels) {
labelCollections.get(l).addNodeId(node);
}
}
protected void removeNodeFromLabels(NodeId node, Set<String> labels) {
for(String l : labels) {
labelCollections.get(l).removeNodeId(node);
}
}
private void replaceNodeForLabels(NodeId node, Set<String> oldLabels,
Set<String> newLabels) {
if(oldLabels != null) {
removeNodeFromLabels(node, oldLabels);
}
addNodeToLabels(node, newLabels);
}
@SuppressWarnings("unchecked")
protected void internalUpdateLabelsOnNodes(
Map<NodeId, Set<String>> nodeToLabels, NodeLabelUpdateOperation op)
throws IOException {
// do update labels from nodes
Map<NodeId, Set<String>> newNMToLabels =
new HashMap<NodeId, Set<String>>();
Set<String> oldLabels;
for (Entry<NodeId, Set<String>> entry : nodeToLabels.entrySet()) {
NodeId nodeId = entry.getKey();
Set<String> labels = entry.getValue();
createHostIfNonExisted(nodeId.getHost());
if (nodeId.getPort() == WILDCARD_PORT) {
Host host = nodeCollections.get(nodeId.getHost());
switch (op) {
case REMOVE:
removeNodeFromLabels(nodeId, labels);
host.labels.removeAll(labels);
for (Node node : host.nms.values()) {
if (node.labels != null) {
node.labels.removeAll(labels);
}
removeNodeFromLabels(node.nodeId, labels);
}
break;
case ADD:
addNodeToLabels(nodeId, labels);
host.labels.addAll(labels);
for (Node node : host.nms.values()) {
if (node.labels != null) {
node.labels.addAll(labels);
}
addNodeToLabels(node.nodeId, labels);
}
break;
case REPLACE:
replaceNodeForLabels(nodeId, host.labels, labels);
host.labels.clear();
host.labels.addAll(labels);
for (Node node : host.nms.values()) {
replaceNodeForLabels(node.nodeId, node.labels, labels);
node.labels = null;
}
break;
default:
break;
}
newNMToLabels.put(nodeId, host.labels);
} else {
if (EnumSet.of(NodeLabelUpdateOperation.ADD,
NodeLabelUpdateOperation.REPLACE).contains(op)) {
// Add and replace
createNodeIfNonExisted(nodeId);
Node nm = getNMInNodeSet(nodeId);
switch (op) {
case ADD:
addNodeToLabels(nodeId, labels);
if (nm.labels == null) {
nm.labels = new HashSet<String>();
}
nm.labels.addAll(labels);
break;
case REPLACE:
oldLabels = getLabelsByNode(nodeId);
replaceNodeForLabels(nodeId, oldLabels, labels);
if (nm.labels == null) {
nm.labels = new HashSet<String>();
}
nm.labels.clear();
nm.labels.addAll(labels);
break;
default:
break;
}
newNMToLabels.put(nodeId, nm.labels);
} else {
// remove
removeNodeFromLabels(nodeId, labels);
Node nm = getNMInNodeSet(nodeId);
if (nm.labels != null) {
nm.labels.removeAll(labels);
newNMToLabels.put(nodeId, nm.labels);
}
}
}
}
if (null != dispatcher && !isDistributedNodeLabelConfiguration) {
// In case of DistributedNodeLabelConfiguration, no need to save the the
// NodeLabels Mapping to the back-end store, as on RM restart/failover
// NodeLabels are collected from NM through Register/Heartbeat again
dispatcher.getEventHandler().handle(
new UpdateNodeToLabelsMappingsEvent(newNMToLabels));
}
// shows node->labels we added
LOG.info(op.name() + " labels on nodes:");
for (Entry<NodeId, Set<String>> entry : newNMToLabels.entrySet()) {
LOG.info(" NM=" + entry.getKey() + ", labels=["
+ StringUtils.join(entry.getValue().iterator(), ",") + "]");
}
}
/**
* remove labels from nodes, labels being removed most be contained by these
* nodes
*
* @param removeLabelsFromNode node {@literal ->} labels map
*/
public void
removeLabelsFromNode(Map<NodeId, Set<String>> removeLabelsFromNode)
throws IOException {
if (!nodeLabelsEnabled) {
LOG.error(NODE_LABELS_NOT_ENABLED_ERR);
throw new IOException(NODE_LABELS_NOT_ENABLED_ERR);
}
removeLabelsFromNode = normalizeNodeIdToLabels(removeLabelsFromNode);
checkRemoveLabelsFromNode(removeLabelsFromNode);
internalUpdateLabelsOnNodes(removeLabelsFromNode,
NodeLabelUpdateOperation.REMOVE);
}
protected void checkReplaceLabelsOnNode(
Map<NodeId, Set<String>> replaceLabelsToNode) throws IOException {
if (null == replaceLabelsToNode || replaceLabelsToNode.isEmpty()) {
return;
}
// check all labels being added existed
Set<String> knownLabels = labelCollections.keySet();
for (Entry<NodeId, Set<String>> entry : replaceLabelsToNode.entrySet()) {
NodeId nodeId = entry.getKey();
Set<String> labels = entry.getValue();
// As in YARN-2694, we disable user add more than 1 labels on a same host
if (labels.size() > 1) {
String msg = String.format("%d labels specified on host=%s"
+ ", please note that we do not support specifying multiple"
+ " labels on a single host for now.", labels.size(),
nodeId.getHost());
LOG.error(msg);
throw new IOException(msg);
}
if (!knownLabels.containsAll(labels)) {
String msg =
"Not all labels being replaced contained by known "
+ "label collections, please check" + ", new labels=["
+ StringUtils.join(labels, ",") + "]";
LOG.error(msg);
throw new IOException(msg);
}
}
}
/**
* replace labels to nodes
*
* @param replaceLabelsToNode node {@literal ->} labels map
*/
public void replaceLabelsOnNode(Map<NodeId, Set<String>> replaceLabelsToNode)
throws IOException {
if (!nodeLabelsEnabled) {
LOG.error(NODE_LABELS_NOT_ENABLED_ERR);
throw new IOException(NODE_LABELS_NOT_ENABLED_ERR);
}
replaceLabelsToNode = normalizeNodeIdToLabels(replaceLabelsToNode);
checkReplaceLabelsOnNode(replaceLabelsToNode);
internalUpdateLabelsOnNodes(replaceLabelsToNode,
NodeLabelUpdateOperation.REPLACE);
}
/**
* Get mapping of nodes to labels
*
* @return nodes to labels map
*/
public Map<NodeId, Set<String>> getNodeLabels() {
Map<NodeId, Set<String>> nodeToLabels =
generateNodeLabelsInfoPerNode(String.class);
return nodeToLabels;
}
/**
* Get mapping of nodes to label info
*
* @return nodes to labels map
*/
public Map<NodeId, Set<NodeLabel>> getNodeLabelsInfo() {
Map<NodeId, Set<NodeLabel>> nodeToLabels =
generateNodeLabelsInfoPerNode(NodeLabel.class);
return nodeToLabels;
}
@SuppressWarnings("unchecked")
private <T> Map<NodeId, Set<T>> generateNodeLabelsInfoPerNode(Class<T> type) {
try {
readLock.lock();
Map<NodeId, Set<T>> nodeToLabels = new HashMap<>();
for (Entry<String, Host> entry : nodeCollections.entrySet()) {
String hostName = entry.getKey();
Host host = entry.getValue();
for (NodeId nodeId : host.nms.keySet()) {
if (type.isAssignableFrom(String.class)) {
Set<String> nodeLabels = getLabelsByNode(nodeId);
if (nodeLabels == null || nodeLabels.isEmpty()) {
continue;
}
nodeToLabels.put(nodeId, (Set<T>) nodeLabels);
} else {
Set<NodeLabel> nodeLabels = getLabelsInfoByNode(nodeId);
if (nodeLabels == null || nodeLabels.isEmpty()) {
continue;
}
nodeToLabels.put(nodeId, (Set<T>) nodeLabels);
}
}
if (!host.labels.isEmpty()) {
if (type.isAssignableFrom(String.class)) {
nodeToLabels.put(NodeId.newInstance(hostName, WILDCARD_PORT),
(Set<T>) host.labels);
} else {
nodeToLabels.put(NodeId.newInstance(hostName, WILDCARD_PORT),
(Set<T>) createNodeLabelFromLabelNames(host.labels));
}
}
}
return Collections.unmodifiableMap(nodeToLabels);
} finally {
readLock.unlock();
}
}
/**
* Get mapping of labels to nodes for all the labels.
*
* @return labels to nodes map
*/
public Map<String, Set<NodeId>> getLabelsToNodes() {
try {
readLock.lock();
return getLabelsToNodes(labelCollections.keySet());
} finally {
readLock.unlock();
}
}
/**
* Get mapping of labels to nodes for specified set of labels.
*
* @param labels set of labels for which labels to nodes mapping will be
* returned.
* @return labels to nodes map
*/
public Map<String, Set<NodeId>> getLabelsToNodes(Set<String> labels) {
try {
readLock.lock();
Map<String, Set<NodeId>> labelsToNodes = getLabelsToNodesMapping(labels,
String.class);
return Collections.unmodifiableMap(labelsToNodes);
} finally {
readLock.unlock();
}
}
/**
* Get mapping of labels to nodes for all the labels.
*
* @return labels to nodes map
*/
public Map<NodeLabel, Set<NodeId>> getLabelsInfoToNodes() {
try {
readLock.lock();
return getLabelsInfoToNodes(labelCollections.keySet());
} finally {
readLock.unlock();
}
}
/**
* Get mapping of labels info to nodes for specified set of labels.
*
* @param nodelabels
* set of nodelabels for which labels to nodes mapping will be
* returned.
* @return labels to nodes map
*/
public Map<NodeLabel, Set<NodeId>> getLabelsInfoToNodes(Set<String> labels) {
try {
readLock.lock();
Map<NodeLabel, Set<NodeId>> labelsToNodes = getLabelsToNodesMapping(
labels, NodeLabel.class);
return Collections.unmodifiableMap(labelsToNodes);
} finally {
readLock.unlock();
}
}
private <T> Map<T, Set<NodeId>> getLabelsToNodesMapping(Set<String> labels,
Class<T> type) {
Map<T, Set<NodeId>> labelsToNodes = new HashMap<T, Set<NodeId>>();
for (String label : labels) {
if (label.equals(NO_LABEL)) {
continue;
}
RMNodeLabel nodeLabelInfo = labelCollections.get(label);
if (nodeLabelInfo != null) {
Set<NodeId> nodeIds = nodeLabelInfo.getAssociatedNodeIds();
if (!nodeIds.isEmpty()) {
if (type.isAssignableFrom(String.class)) {
labelsToNodes.put(type.cast(label), nodeIds);
} else {
labelsToNodes.put(type.cast(nodeLabelInfo.getNodeLabel()), nodeIds);
}
}
} else {
LOG.warn("getLabelsToNodes : Label [" + label + "] cannot be found");
}
}
return labelsToNodes;
}
/**
* Get existing valid labels in repository
*
* @return existing valid labels in repository
*/
public Set<String> getClusterNodeLabelNames() {
try {
readLock.lock();
Set<String> labels = new HashSet<String>(labelCollections.keySet());
labels.remove(NO_LABEL);
return Collections.unmodifiableSet(labels);
} finally {
readLock.unlock();
}
}
public List<NodeLabel> getClusterNodeLabels() {
try {
readLock.lock();
List<NodeLabel> nodeLabels = new ArrayList<>();
for (RMNodeLabel label : labelCollections.values()) {
if (!label.getLabelName().equals(NO_LABEL)) {
nodeLabels.add(NodeLabel.newInstance(label.getLabelName(),
label.getIsExclusive()));
}
}
return nodeLabels;
} finally {
readLock.unlock();
}
}
public boolean isExclusiveNodeLabel(String nodeLabel) throws IOException {
try {
readLock.lock();
RMNodeLabel label = labelCollections.get(nodeLabel);
if (label == null) {
String message =
"Getting is-exclusive-node-label, node-label = " + nodeLabel
+ ", is not existed.";
LOG.error(message);
throw new IOException(message);
}
return label.getIsExclusive();
} finally {
readLock.unlock();
}
}
private void checkAndThrowLabelName(String label) throws IOException {
if (label == null || label.isEmpty() || label.length() > MAX_LABEL_LENGTH) {
throw new IOException("label added is empty or exceeds "
+ MAX_LABEL_LENGTH + " character(s)");
}
label = label.trim();
boolean match = LABEL_PATTERN.matcher(label).matches();
if (!match) {
throw new IOException("label name should only contains "
+ "{0-9, a-z, A-Z, -, _} and should not started with {-,_}"
+ ", now it is=" + label);
}
}
private void checkExclusivityMatch(Collection<NodeLabel> labels)
throws IOException {
ArrayList<NodeLabel> mismatchlabels = new ArrayList<NodeLabel>();
for (NodeLabel label : labels) {
RMNodeLabel rmNodeLabel = this.labelCollections.get(label.getName());
if (rmNodeLabel != null
&& rmNodeLabel.getIsExclusive() != label.isExclusive()) {
mismatchlabels.add(label);
}
}
if (mismatchlabels.size() > 0) {
throw new IOException(
"Exclusivity cannot be modified for an existing label with : "
+ StringUtils.join(mismatchlabels.iterator(), ","));
}
}
protected String normalizeLabel(String label) {
if (label != null) {
return label.trim();
}
return NO_LABEL;
}
private Set<String> normalizeLabels(Collection<String> labels) {
Set<String> newLabels = new HashSet<String>();
for (String label : labels) {
newLabels.add(normalizeLabel(label));
}
return newLabels;
}
private void normalizeNodeLabels(Collection<NodeLabel> labels) {
for (NodeLabel label : labels) {
label.setName(normalizeLabel(label.getName()));
}
}
protected Node getNMInNodeSet(NodeId nodeId) {
return getNMInNodeSet(nodeId, nodeCollections);
}
protected Node getNMInNodeSet(NodeId nodeId, Map<String, Host> map) {
return getNMInNodeSet(nodeId, map, false);
}
protected Node getNMInNodeSet(NodeId nodeId, Map<String, Host> map,
boolean checkRunning) {
Host host = map.get(nodeId.getHost());
if (null == host) {
return null;
}
Node nm = host.nms.get(nodeId);
if (null == nm) {
return null;
}
if (checkRunning) {
return nm.running ? nm : null;
}
return nm;
}
protected Set<String> getLabelsByNode(NodeId nodeId) {
return getLabelsByNode(nodeId, nodeCollections);
}
protected Set<String> getLabelsByNode(NodeId nodeId, Map<String, Host> map) {
Host host = map.get(nodeId.getHost());
if (null == host) {
return EMPTY_STRING_SET;
}
Node nm = host.nms.get(nodeId);
if (null != nm && null != nm.labels) {
return nm.labels;
} else {
return host.labels;
}
}
public Set<NodeLabel> getLabelsInfoByNode(NodeId nodeId) {
try {
readLock.lock();
Set<String> labels = getLabelsByNode(nodeId, nodeCollections);
if (labels.isEmpty()) {
return EMPTY_NODELABEL_SET;
}
Set<NodeLabel> nodeLabels = createNodeLabelFromLabelNames(labels);
return nodeLabels;
} finally {
readLock.unlock();
}
}
private Set<NodeLabel> createNodeLabelFromLabelNames(Set<String> labels) {
Set<NodeLabel> nodeLabels = new HashSet<NodeLabel>();
for (String label : labels) {
if (label.equals(NO_LABEL)) {
continue;
}
RMNodeLabel rmLabel = labelCollections.get(label);
if (rmLabel == null) {
continue;
}
nodeLabels.add(rmLabel.getNodeLabel());
}
return nodeLabels;
}
protected void createNodeIfNonExisted(NodeId nodeId) throws IOException {
Host host = nodeCollections.get(nodeId.getHost());
if (null == host) {
throw new IOException("Should create host before creating node.");
}
Node nm = host.nms.get(nodeId);
if (null == nm) {
host.nms.put(nodeId, new Node(nodeId));
}
}
protected void createHostIfNonExisted(String hostName) {
Host host = nodeCollections.get(hostName);
if (null == host) {
host = new Host();
nodeCollections.put(hostName, host);
}
}
protected Map<NodeId, Set<String>> normalizeNodeIdToLabels(
Map<NodeId, Set<String>> nodeIdToLabels) {
Map<NodeId, Set<String>> newMap = new HashMap<NodeId, Set<String>>();
for (Entry<NodeId, Set<String>> entry : nodeIdToLabels.entrySet()) {
NodeId id = entry.getKey();
Set<String> labels = entry.getValue();
newMap.put(id, normalizeLabels(labels));
}
return newMap;
}
}
| 34,446 | 31.103448 | 89 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/RMNodeLabel.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.nodelabels;
import java.util.HashSet;
import java.util.Set;
import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.api.records.NodeLabel;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.util.resource.Resources;
public class RMNodeLabel implements Comparable<RMNodeLabel> {
private Resource resource;
private int numActiveNMs;
private String labelName;
private Set<NodeId> nodeIds;
private boolean exclusive;
private NodeLabel nodeLabel;
public RMNodeLabel(NodeLabel nodeLabel) {
this(nodeLabel.getName(), Resource.newInstance(0, 0), 0,
nodeLabel.isExclusive());
}
public RMNodeLabel(String labelName) {
this(labelName, Resource.newInstance(0, 0), 0,
NodeLabel.DEFAULT_NODE_LABEL_EXCLUSIVITY);
}
protected RMNodeLabel(String labelName, Resource res, int activeNMs,
boolean exclusive) {
this.labelName = labelName;
this.resource = res;
this.numActiveNMs = activeNMs;
this.nodeIds = new HashSet<NodeId>();
this.exclusive = exclusive;
this.nodeLabel = NodeLabel.newInstance(labelName, exclusive);
}
public void addNodeId(NodeId node) {
nodeIds.add(node);
}
public void removeNodeId(NodeId node) {
nodeIds.remove(node);
}
public Set<NodeId> getAssociatedNodeIds() {
return new HashSet<NodeId>(nodeIds);
}
public void addNode(Resource nodeRes) {
Resources.addTo(resource, nodeRes);
numActiveNMs++;
}
public void removeNode(Resource nodeRes) {
Resources.subtractFrom(resource, nodeRes);
numActiveNMs--;
}
public Resource getResource() {
return this.resource;
}
public int getNumActiveNMs() {
return numActiveNMs;
}
public String getLabelName() {
return labelName;
}
public void setIsExclusive(boolean exclusive) {
this.exclusive = exclusive;
}
public boolean getIsExclusive() {
return this.exclusive;
}
public RMNodeLabel getCopy() {
return new RMNodeLabel(labelName, resource, numActiveNMs, exclusive);
}
public NodeLabel getNodeLabel() {
return this.nodeLabel;
}
@Override
public int compareTo(RMNodeLabel o) {
// We should always put empty label entry first after sorting
if (labelName.isEmpty() != o.getLabelName().isEmpty()) {
if (labelName.isEmpty()) {
return -1;
}
return 1;
}
return labelName.compareTo(o.getLabelName());
}
@Override
public boolean equals(Object obj) {
if (obj instanceof RMNodeLabel) {
RMNodeLabel other = (RMNodeLabel) obj;
return Resources.equals(resource, other.getResource())
&& StringUtils.equals(labelName, other.getLabelName())
&& (other.getNumActiveNMs() == numActiveNMs);
}
return false;
}
@Override
public int hashCode() {
final int prime = 502357;
return (int) ((((long) labelName.hashCode() << 8)
+ (resource.hashCode() << 4) + numActiveNMs) % prime);
}
}
| 3,877 | 27.101449 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/FileSystemNodeLabelsStore.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.nodelabels;
import java.io.EOFException;
import java.io.IOException;
import java.util.Collection;
import java.util.List;
import java.util.Map;
import java.util.Set;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.LocalFileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.api.records.NodeLabel;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.AddToClusterNodeLabelsRequestProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RemoveFromClusterNodeLabelsRequestProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.ReplaceLabelsOnNodeRequestProto;
import org.apache.hadoop.yarn.server.api.protocolrecords.AddToClusterNodeLabelsRequest;
import org.apache.hadoop.yarn.server.api.protocolrecords.RemoveFromClusterNodeLabelsRequest;
import org.apache.hadoop.yarn.server.api.protocolrecords.ReplaceLabelsOnNodeRequest;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.AddToClusterNodeLabelsRequestPBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RemoveFromClusterNodeLabelsRequestPBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.ReplaceLabelsOnNodeRequestPBImpl;
import com.google.common.collect.Sets;
public class FileSystemNodeLabelsStore extends NodeLabelsStore {
public FileSystemNodeLabelsStore(CommonNodeLabelsManager mgr) {
super(mgr);
}
protected static final Log LOG = LogFactory.getLog(FileSystemNodeLabelsStore.class);
protected static final String DEFAULT_DIR_NAME = "node-labels";
protected static final String MIRROR_FILENAME = "nodelabel.mirror";
protected static final String EDITLOG_FILENAME = "nodelabel.editlog";
protected enum SerializedLogType {
ADD_LABELS, NODE_TO_LABELS, REMOVE_LABELS
}
Path fsWorkingPath;
FileSystem fs;
FSDataOutputStream editlogOs;
Path editLogPath;
private String getDefaultFSNodeLabelsRootDir() throws IOException {
// default is in local: /tmp/hadoop-yarn-${user}/node-labels/
return "file:///tmp/hadoop-yarn-"
+ UserGroupInformation.getCurrentUser().getShortUserName() + "/"
+ DEFAULT_DIR_NAME;
}
@Override
public void init(Configuration conf) throws Exception {
fsWorkingPath =
new Path(conf.get(YarnConfiguration.FS_NODE_LABELS_STORE_ROOT_DIR,
getDefaultFSNodeLabelsRootDir()));
setFileSystem(conf);
// mkdir of root dir path
fs.mkdirs(fsWorkingPath);
}
@Override
public void close() throws IOException {
IOUtils.cleanup(LOG, fs, editlogOs);
}
private void setFileSystem(Configuration conf) throws IOException {
Configuration confCopy = new Configuration(conf);
confCopy.setBoolean("dfs.client.retry.policy.enabled", true);
String retryPolicy =
confCopy.get(YarnConfiguration.FS_NODE_LABELS_STORE_RETRY_POLICY_SPEC,
YarnConfiguration.DEFAULT_FS_NODE_LABELS_STORE_RETRY_POLICY_SPEC);
confCopy.set("dfs.client.retry.policy.spec", retryPolicy);
fs = fsWorkingPath.getFileSystem(confCopy);
// if it's local file system, use RawLocalFileSystem instead of
// LocalFileSystem, the latter one doesn't support append.
if (fs.getScheme().equals("file")) {
fs = ((LocalFileSystem)fs).getRaw();
}
}
private void ensureAppendEditlogFile() throws IOException {
editlogOs = fs.append(editLogPath);
}
private void ensureCloseEditlogFile() throws IOException {
editlogOs.close();
}
@Override
public void updateNodeToLabelsMappings(
Map<NodeId, Set<String>> nodeToLabels) throws IOException {
try {
ensureAppendEditlogFile();
editlogOs.writeInt(SerializedLogType.NODE_TO_LABELS.ordinal());
((ReplaceLabelsOnNodeRequestPBImpl) ReplaceLabelsOnNodeRequest
.newInstance(nodeToLabels)).getProto().writeDelimitedTo(editlogOs);
} finally {
ensureCloseEditlogFile();
}
}
@Override
public void storeNewClusterNodeLabels(List<NodeLabel> labels)
throws IOException {
try {
ensureAppendEditlogFile();
editlogOs.writeInt(SerializedLogType.ADD_LABELS.ordinal());
((AddToClusterNodeLabelsRequestPBImpl) AddToClusterNodeLabelsRequest
.newInstance(labels)).getProto().writeDelimitedTo(editlogOs);
} finally {
ensureCloseEditlogFile();
}
}
@Override
public void removeClusterNodeLabels(Collection<String> labels)
throws IOException {
try {
ensureAppendEditlogFile();
editlogOs.writeInt(SerializedLogType.REMOVE_LABELS.ordinal());
((RemoveFromClusterNodeLabelsRequestPBImpl) RemoveFromClusterNodeLabelsRequest.newInstance(Sets
.newHashSet(labels.iterator()))).getProto().writeDelimitedTo(editlogOs);
} finally {
ensureCloseEditlogFile();
}
}
/* (non-Javadoc)
* @see org.apache.hadoop.yarn.nodelabels.NodeLabelsStore#recover(boolean)
*/
@Override
public void recover(boolean ignoreNodeToLabelsMappings) throws YarnException,
IOException {
/*
* Steps of recover
* 1) Read from last mirror (from mirror or mirror.old)
* 2) Read from last edit log, and apply such edit log
* 3) Write new mirror to mirror.writing
* 4) Rename mirror to mirror.old
* 5) Move mirror.writing to mirror
* 6) Remove mirror.old
* 7) Remove edit log and create a new empty edit log
*/
// Open mirror from serialized file
Path mirrorPath = new Path(fsWorkingPath, MIRROR_FILENAME);
Path oldMirrorPath = new Path(fsWorkingPath, MIRROR_FILENAME + ".old");
FSDataInputStream is = null;
if (fs.exists(mirrorPath)) {
is = fs.open(mirrorPath);
} else if (fs.exists(oldMirrorPath)) {
is = fs.open(oldMirrorPath);
}
if (null != is) {
List<NodeLabel> labels =
new AddToClusterNodeLabelsRequestPBImpl(
AddToClusterNodeLabelsRequestProto.parseDelimitedFrom(is)).getNodeLabels();
Map<NodeId, Set<String>> nodeToLabels =
new ReplaceLabelsOnNodeRequestPBImpl(
ReplaceLabelsOnNodeRequestProto.parseDelimitedFrom(is))
.getNodeToLabels();
mgr.addToCluserNodeLabels(labels);
mgr.replaceLabelsOnNode(nodeToLabels);
is.close();
}
// Open and process editlog
editLogPath = new Path(fsWorkingPath, EDITLOG_FILENAME);
if (fs.exists(editLogPath)) {
is = fs.open(editLogPath);
while (true) {
try {
// read edit log one by one
SerializedLogType type = SerializedLogType.values()[is.readInt()];
switch (type) {
case ADD_LABELS: {
List<NodeLabel> labels =
new AddToClusterNodeLabelsRequestPBImpl(
AddToClusterNodeLabelsRequestProto.parseDelimitedFrom(is))
.getNodeLabels();
mgr.addToCluserNodeLabels(labels);
break;
}
case REMOVE_LABELS: {
Collection<String> labels =
RemoveFromClusterNodeLabelsRequestProto.parseDelimitedFrom(is)
.getNodeLabelsList();
mgr.removeFromClusterNodeLabels(labels);
break;
}
case NODE_TO_LABELS: {
Map<NodeId, Set<String>> map =
new ReplaceLabelsOnNodeRequestPBImpl(
ReplaceLabelsOnNodeRequestProto.parseDelimitedFrom(is))
.getNodeToLabels();
if (!ignoreNodeToLabelsMappings) {
/*
* In case of Distributed NodeLabels setup,
* ignoreNodeToLabelsMappings will be set to true and recover will
* be invoked. As RM will collect the node labels from NM through
* registration/HB
*/
mgr.replaceLabelsOnNode(map);
}
break;
}
}
} catch (EOFException e) {
// EOF hit, break
break;
}
}
}
// Serialize current mirror to mirror.writing
Path writingMirrorPath = new Path(fsWorkingPath, MIRROR_FILENAME + ".writing");
FSDataOutputStream os = fs.create(writingMirrorPath, true);
((AddToClusterNodeLabelsRequestPBImpl) AddToClusterNodeLabelsRequestPBImpl
.newInstance(mgr.getClusterNodeLabels())).getProto().writeDelimitedTo(os);
((ReplaceLabelsOnNodeRequestPBImpl) ReplaceLabelsOnNodeRequest
.newInstance(mgr.getNodeLabels())).getProto().writeDelimitedTo(os);
os.close();
// Move mirror to mirror.old
if (fs.exists(mirrorPath)) {
fs.delete(oldMirrorPath, false);
fs.rename(mirrorPath, oldMirrorPath);
}
// move mirror.writing to mirror
fs.rename(writingMirrorPath, mirrorPath);
fs.delete(writingMirrorPath, false);
// remove mirror.old
fs.delete(oldMirrorPath, false);
// create a new editlog file
editlogOs = fs.create(editLogPath, true);
editlogOs.close();
LOG.info("Finished write mirror at:" + mirrorPath.toString());
LOG.info("Finished create editlog file at:" + editLogPath.toString());
}
}
| 10,475 | 36.017668 | 115 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/NodeLabelsStore.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.nodelabels;
import java.io.Closeable;
import java.io.IOException;
import java.util.Collection;
import java.util.List;
import java.util.Map;
import java.util.Set;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.api.records.NodeLabel;
import org.apache.hadoop.yarn.exceptions.YarnException;
public abstract class NodeLabelsStore implements Closeable {
protected final CommonNodeLabelsManager mgr;
public NodeLabelsStore(CommonNodeLabelsManager mgr) {
this.mgr = mgr;
}
/**
* Store node {@literal ->} label
*/
public abstract void updateNodeToLabelsMappings(
Map<NodeId, Set<String>> nodeToLabels) throws IOException;
/**
* Store new labels
*/
public abstract void storeNewClusterNodeLabels(List<NodeLabel> label)
throws IOException;
/**
* Remove labels
*/
public abstract void removeClusterNodeLabels(Collection<String> labels)
throws IOException;
/**
* Recover labels and node to labels mappings from store, but if
* ignoreNodeToLabelsMappings is true then node to labels mappings should not
* be recovered. In case of Distributed NodeLabels setup
* ignoreNodeToLabelsMappings will be set to true and recover will be invoked
* as RM will collect the node labels from NM through registration/HB
*
* @param ignoreNodeToLabelsMappings
* @throws IOException
* @throws YarnException
*/
public abstract void recover(boolean ignoreNodeToLabelsMappings)
throws IOException, YarnException;
public void init(Configuration conf) throws Exception {}
public CommonNodeLabelsManager getNodeLabelsManager() {
return mgr;
}
}
| 2,549 | 31.692308 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/event/RemoveClusterNodeLabels.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.nodelabels.event;
import java.util.Collection;
public class RemoveClusterNodeLabels extends NodeLabelsStoreEvent {
private Collection<String> labels;
public RemoveClusterNodeLabels(Collection<String> labels) {
super(NodeLabelsStoreEventType.REMOVE_LABELS);
this.labels = labels;
}
public Collection<String> getLabels() {
return labels;
}
}
| 1,209 | 33.571429 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/event/NodeLabelsStoreEvent.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.nodelabels.event;
import org.apache.hadoop.yarn.event.AbstractEvent;
public class NodeLabelsStoreEvent extends
AbstractEvent<NodeLabelsStoreEventType> {
public NodeLabelsStoreEvent(NodeLabelsStoreEventType type) {
super(type);
}
}
| 1,083 | 37.714286 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/event/StoreNewClusterNodeLabels.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.nodelabels.event;
import java.util.List;
import org.apache.hadoop.yarn.api.records.NodeLabel;
public class StoreNewClusterNodeLabels extends NodeLabelsStoreEvent {
private List<NodeLabel> labels;
public StoreNewClusterNodeLabels(List<NodeLabel> labels) {
super(NodeLabelsStoreEventType.ADD_LABELS);
this.labels = labels;
}
public List<NodeLabel> getLabels() {
return labels;
}
}
| 1,249 | 32.783784 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/event/NodeLabelsStoreEventType.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.nodelabels.event;
public enum NodeLabelsStoreEventType {
REMOVE_LABELS,
ADD_LABELS,
STORE_NODE_TO_LABELS
}
| 953 | 35.692308 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/event/UpdateNodeToLabelsMappingsEvent.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.nodelabels.event;
import java.util.Map;
import java.util.Set;
import org.apache.hadoop.yarn.api.records.NodeId;
public class UpdateNodeToLabelsMappingsEvent extends NodeLabelsStoreEvent {
private Map<NodeId, Set<String>> nodeToLabels;
public UpdateNodeToLabelsMappingsEvent(Map<NodeId, Set<String>> nodeToLabels) {
super(NodeLabelsStoreEventType.STORE_NODE_TO_LABELS);
this.nodeToLabels = nodeToLabels;
}
public Map<NodeId, Set<String>> getNodeToLabels() {
return nodeToLabels;
}
}
| 1,348 | 34.5 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/state/StateMachine.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.state;
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Evolving;
@Public
@Evolving
public interface StateMachine
<STATE extends Enum<STATE>,
EVENTTYPE extends Enum<EVENTTYPE>, EVENT> {
public STATE getCurrentState();
public STATE doTransition(EVENTTYPE eventType, EVENT event)
throws InvalidStateTransitionException;
}
| 1,268 | 37.454545 | 74 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/state/package-info.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
@InterfaceAudience.Public
package org.apache.hadoop.yarn.state;
import org.apache.hadoop.classification.InterfaceAudience;
| 930 | 41.318182 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/state/StateMachineFactory.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.state;
import java.util.EnumMap;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;
import java.util.Stack;
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Evolving;
/**
* State machine topology.
* This object is semantically immutable. If you have a
* StateMachineFactory there's no operation in the API that changes
* its semantic properties.
*
* @param <OPERAND> The object type on which this state machine operates.
* @param <STATE> The state of the entity.
* @param <EVENTTYPE> The external eventType to be handled.
* @param <EVENT> The event object.
*
*/
@Public
@Evolving
final public class StateMachineFactory
<OPERAND, STATE extends Enum<STATE>,
EVENTTYPE extends Enum<EVENTTYPE>, EVENT> {
private final TransitionsListNode transitionsListNode;
private Map<STATE, Map<EVENTTYPE,
Transition<OPERAND, STATE, EVENTTYPE, EVENT>>> stateMachineTable;
private STATE defaultInitialState;
private final boolean optimized;
/**
* Constructor
*
* This is the only constructor in the API.
*
*/
public StateMachineFactory(STATE defaultInitialState) {
this.transitionsListNode = null;
this.defaultInitialState = defaultInitialState;
this.optimized = false;
this.stateMachineTable = null;
}
private StateMachineFactory
(StateMachineFactory<OPERAND, STATE, EVENTTYPE, EVENT> that,
ApplicableTransition<OPERAND, STATE, EVENTTYPE, EVENT> t) {
this.defaultInitialState = that.defaultInitialState;
this.transitionsListNode
= new TransitionsListNode(t, that.transitionsListNode);
this.optimized = false;
this.stateMachineTable = null;
}
private StateMachineFactory
(StateMachineFactory<OPERAND, STATE, EVENTTYPE, EVENT> that,
boolean optimized) {
this.defaultInitialState = that.defaultInitialState;
this.transitionsListNode = that.transitionsListNode;
this.optimized = optimized;
if (optimized) {
makeStateMachineTable();
} else {
stateMachineTable = null;
}
}
private interface ApplicableTransition
<OPERAND, STATE extends Enum<STATE>,
EVENTTYPE extends Enum<EVENTTYPE>, EVENT> {
void apply(StateMachineFactory<OPERAND, STATE, EVENTTYPE, EVENT> subject);
}
private class TransitionsListNode {
final ApplicableTransition<OPERAND, STATE, EVENTTYPE, EVENT> transition;
final TransitionsListNode next;
TransitionsListNode
(ApplicableTransition<OPERAND, STATE, EVENTTYPE, EVENT> transition,
TransitionsListNode next) {
this.transition = transition;
this.next = next;
}
}
static private class ApplicableSingleOrMultipleTransition
<OPERAND, STATE extends Enum<STATE>,
EVENTTYPE extends Enum<EVENTTYPE>, EVENT>
implements ApplicableTransition<OPERAND, STATE, EVENTTYPE, EVENT> {
final STATE preState;
final EVENTTYPE eventType;
final Transition<OPERAND, STATE, EVENTTYPE, EVENT> transition;
ApplicableSingleOrMultipleTransition
(STATE preState, EVENTTYPE eventType,
Transition<OPERAND, STATE, EVENTTYPE, EVENT> transition) {
this.preState = preState;
this.eventType = eventType;
this.transition = transition;
}
@Override
public void apply
(StateMachineFactory<OPERAND, STATE, EVENTTYPE, EVENT> subject) {
Map<EVENTTYPE, Transition<OPERAND, STATE, EVENTTYPE, EVENT>> transitionMap
= subject.stateMachineTable.get(preState);
if (transitionMap == null) {
// I use HashMap here because I would expect most EVENTTYPE's to not
// apply out of a particular state, so FSM sizes would be
// quadratic if I use EnumMap's here as I do at the top level.
transitionMap = new HashMap<EVENTTYPE,
Transition<OPERAND, STATE, EVENTTYPE, EVENT>>();
subject.stateMachineTable.put(preState, transitionMap);
}
transitionMap.put(eventType, transition);
}
}
/**
* @return a NEW StateMachineFactory just like {@code this} with the current
* transition added as a new legal transition. This overload
* has no hook object.
*
* Note that the returned StateMachineFactory is a distinct
* object.
*
* This method is part of the API.
*
* @param preState pre-transition state
* @param postState post-transition state
* @param eventType stimulus for the transition
*/
public StateMachineFactory
<OPERAND, STATE, EVENTTYPE, EVENT>
addTransition(STATE preState, STATE postState, EVENTTYPE eventType) {
return addTransition(preState, postState, eventType, null);
}
/**
* @return a NEW StateMachineFactory just like {@code this} with the current
* transition added as a new legal transition. This overload
* has no hook object.
*
*
* Note that the returned StateMachineFactory is a distinct
* object.
*
* This method is part of the API.
*
* @param preState pre-transition state
* @param postState post-transition state
* @param eventTypes List of stimuli for the transitions
*/
public StateMachineFactory<OPERAND, STATE, EVENTTYPE, EVENT> addTransition(
STATE preState, STATE postState, Set<EVENTTYPE> eventTypes) {
return addTransition(preState, postState, eventTypes, null);
}
/**
* @return a NEW StateMachineFactory just like {@code this} with the current
* transition added as a new legal transition
*
* Note that the returned StateMachineFactory is a distinct
* object.
*
* This method is part of the API.
*
* @param preState pre-transition state
* @param postState post-transition state
* @param eventTypes List of stimuli for the transitions
* @param hook transition hook
*/
public StateMachineFactory<OPERAND, STATE, EVENTTYPE, EVENT> addTransition(
STATE preState, STATE postState, Set<EVENTTYPE> eventTypes,
SingleArcTransition<OPERAND, EVENT> hook) {
StateMachineFactory<OPERAND, STATE, EVENTTYPE, EVENT> factory = null;
for (EVENTTYPE event : eventTypes) {
if (factory == null) {
factory = addTransition(preState, postState, event, hook);
} else {
factory = factory.addTransition(preState, postState, event, hook);
}
}
return factory;
}
/**
* @return a NEW StateMachineFactory just like {@code this} with the current
* transition added as a new legal transition
*
* Note that the returned StateMachineFactory is a distinct object.
*
* This method is part of the API.
*
* @param preState pre-transition state
* @param postState post-transition state
* @param eventType stimulus for the transition
* @param hook transition hook
*/
public StateMachineFactory
<OPERAND, STATE, EVENTTYPE, EVENT>
addTransition(STATE preState, STATE postState,
EVENTTYPE eventType,
SingleArcTransition<OPERAND, EVENT> hook){
return new StateMachineFactory<OPERAND, STATE, EVENTTYPE, EVENT>
(this, new ApplicableSingleOrMultipleTransition<OPERAND, STATE, EVENTTYPE, EVENT>
(preState, eventType, new SingleInternalArc(postState, hook)));
}
/**
* @return a NEW StateMachineFactory just like {@code this} with the current
* transition added as a new legal transition
*
* Note that the returned StateMachineFactory is a distinct object.
*
* This method is part of the API.
*
* @param preState pre-transition state
* @param postStates valid post-transition states
* @param eventType stimulus for the transition
* @param hook transition hook
*/
public StateMachineFactory
<OPERAND, STATE, EVENTTYPE, EVENT>
addTransition(STATE preState, Set<STATE> postStates,
EVENTTYPE eventType,
MultipleArcTransition<OPERAND, EVENT, STATE> hook){
return new StateMachineFactory<OPERAND, STATE, EVENTTYPE, EVENT>
(this,
new ApplicableSingleOrMultipleTransition<OPERAND, STATE, EVENTTYPE, EVENT>
(preState, eventType, new MultipleInternalArc(postStates, hook)));
}
/**
* @return a StateMachineFactory just like {@code this}, except that if
* you won't need any synchronization to build a state machine
*
* Note that the returned StateMachineFactory is a distinct object.
*
* This method is part of the API.
*
* The only way you could distinguish the returned
* StateMachineFactory from {@code this} would be by
* measuring the performance of the derived
* {@code StateMachine} you can get from it.
*
* Calling this is optional. It doesn't change the semantics of the factory,
* if you call it then when you use the factory there is no synchronization.
*/
public StateMachineFactory
<OPERAND, STATE, EVENTTYPE, EVENT>
installTopology() {
return new StateMachineFactory<OPERAND, STATE, EVENTTYPE, EVENT>(this, true);
}
/**
* Effect a transition due to the effecting stimulus.
* @param state current state
* @param eventType trigger to initiate the transition
* @param cause causal eventType context
* @return transitioned state
*/
private STATE doTransition
(OPERAND operand, STATE oldState, EVENTTYPE eventType, EVENT event)
throws InvalidStateTransitionException {
// We can assume that stateMachineTable is non-null because we call
// maybeMakeStateMachineTable() when we build an InnerStateMachine ,
// and this code only gets called from inside a working InnerStateMachine .
Map<EVENTTYPE, Transition<OPERAND, STATE, EVENTTYPE, EVENT>> transitionMap
= stateMachineTable.get(oldState);
if (transitionMap != null) {
Transition<OPERAND, STATE, EVENTTYPE, EVENT> transition
= transitionMap.get(eventType);
if (transition != null) {
return transition.doTransition(operand, oldState, event, eventType);
}
}
throw new InvalidStateTransitionException(oldState, eventType);
}
private synchronized void maybeMakeStateMachineTable() {
if (stateMachineTable == null) {
makeStateMachineTable();
}
}
private void makeStateMachineTable() {
Stack<ApplicableTransition<OPERAND, STATE, EVENTTYPE, EVENT>> stack =
new Stack<ApplicableTransition<OPERAND, STATE, EVENTTYPE, EVENT>>();
Map<STATE, Map<EVENTTYPE, Transition<OPERAND, STATE, EVENTTYPE, EVENT>>>
prototype = new HashMap<STATE, Map<EVENTTYPE, Transition<OPERAND, STATE, EVENTTYPE, EVENT>>>();
prototype.put(defaultInitialState, null);
// I use EnumMap here because it'll be faster and denser. I would
// expect most of the states to have at least one transition.
stateMachineTable
= new EnumMap<STATE, Map<EVENTTYPE,
Transition<OPERAND, STATE, EVENTTYPE, EVENT>>>(prototype);
for (TransitionsListNode cursor = transitionsListNode;
cursor != null;
cursor = cursor.next) {
stack.push(cursor.transition);
}
while (!stack.isEmpty()) {
stack.pop().apply(this);
}
}
private interface Transition<OPERAND, STATE extends Enum<STATE>,
EVENTTYPE extends Enum<EVENTTYPE>, EVENT> {
STATE doTransition(OPERAND operand, STATE oldState,
EVENT event, EVENTTYPE eventType);
}
private class SingleInternalArc
implements Transition<OPERAND, STATE, EVENTTYPE, EVENT> {
private STATE postState;
private SingleArcTransition<OPERAND, EVENT> hook; // transition hook
SingleInternalArc(STATE postState,
SingleArcTransition<OPERAND, EVENT> hook) {
this.postState = postState;
this.hook = hook;
}
@Override
public STATE doTransition(OPERAND operand, STATE oldState,
EVENT event, EVENTTYPE eventType) {
if (hook != null) {
hook.transition(operand, event);
}
return postState;
}
}
private class MultipleInternalArc
implements Transition<OPERAND, STATE, EVENTTYPE, EVENT>{
// Fields
private Set<STATE> validPostStates;
private MultipleArcTransition<OPERAND, EVENT, STATE> hook; // transition hook
MultipleInternalArc(Set<STATE> postStates,
MultipleArcTransition<OPERAND, EVENT, STATE> hook) {
this.validPostStates = postStates;
this.hook = hook;
}
@Override
public STATE doTransition(OPERAND operand, STATE oldState,
EVENT event, EVENTTYPE eventType)
throws InvalidStateTransitionException {
STATE postState = hook.transition(operand, event);
if (!validPostStates.contains(postState)) {
throw new InvalidStateTransitionException(oldState, eventType);
}
return postState;
}
}
/*
* @return a {@link StateMachine} that starts in
* {@code initialState} and whose {@link Transition} s are
* applied to {@code operand} .
*
* This is part of the API.
*
* @param operand the object upon which the returned
* {@link StateMachine} will operate.
* @param initialState the state in which the returned
* {@link StateMachine} will start.
*
*/
public StateMachine<STATE, EVENTTYPE, EVENT>
make(OPERAND operand, STATE initialState) {
return new InternalStateMachine(operand, initialState);
}
/*
* @return a {@link StateMachine} that starts in the default initial
* state and whose {@link Transition} s are applied to
* {@code operand} .
*
* This is part of the API.
*
* @param operand the object upon which the returned
* {@link StateMachine} will operate.
*
*/
public StateMachine<STATE, EVENTTYPE, EVENT> make(OPERAND operand) {
return new InternalStateMachine(operand, defaultInitialState);
}
private class InternalStateMachine
implements StateMachine<STATE, EVENTTYPE, EVENT> {
private final OPERAND operand;
private STATE currentState;
InternalStateMachine(OPERAND operand, STATE initialState) {
this.operand = operand;
this.currentState = initialState;
if (!optimized) {
maybeMakeStateMachineTable();
}
}
@Override
public synchronized STATE getCurrentState() {
return currentState;
}
@Override
public synchronized STATE doTransition(EVENTTYPE eventType, EVENT event)
throws InvalidStateTransitionException {
currentState = StateMachineFactory.this.doTransition
(operand, currentState, eventType, event);
return currentState;
}
}
/**
* Generate a graph represents the state graph of this StateMachine
* @param name graph name
* @return Graph object generated
*/
@SuppressWarnings("rawtypes")
public Graph generateStateGraph(String name) {
maybeMakeStateMachineTable();
Graph g = new Graph(name);
for (STATE startState : stateMachineTable.keySet()) {
Map<EVENTTYPE, Transition<OPERAND, STATE, EVENTTYPE, EVENT>> transitions
= stateMachineTable.get(startState);
for (Entry<EVENTTYPE, Transition<OPERAND, STATE, EVENTTYPE, EVENT>> entry :
transitions.entrySet()) {
Transition<OPERAND, STATE, EVENTTYPE, EVENT> transition = entry.getValue();
if (transition instanceof StateMachineFactory.SingleInternalArc) {
StateMachineFactory.SingleInternalArc sa
= (StateMachineFactory.SingleInternalArc) transition;
Graph.Node fromNode = g.getNode(startState.toString());
Graph.Node toNode = g.getNode(sa.postState.toString());
fromNode.addEdge(toNode, entry.getKey().toString());
} else if (transition instanceof StateMachineFactory.MultipleInternalArc) {
StateMachineFactory.MultipleInternalArc ma
= (StateMachineFactory.MultipleInternalArc) transition;
Iterator iter = ma.validPostStates.iterator();
while (iter.hasNext()) {
Graph.Node fromNode = g.getNode(startState.toString());
Graph.Node toNode = g.getNode(iter.next().toString());
fromNode.addEdge(toNode, entry.getKey().toString());
}
}
}
}
return g;
}
}
| 17,606 | 34.932653 | 101 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/state/InvalidStateTransitonException.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.state;
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Evolving;
/** @deprecated Use {@link InvalidStateTransitionException} instead. */
@Public
@Evolving
@Deprecated
public class InvalidStateTransitonException extends
InvalidStateTransitionException {
private static final long serialVersionUID = 8610511635996283691L;
public InvalidStateTransitonException(Enum<?> currentState, Enum<?> event) {
super(currentState, event);
}
}
| 1,350 | 32.775 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/state/SingleArcTransition.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.state;
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Evolving;
/**
* Hook for Transition. This lead to state machine to move to
* the post state as registered in the state machine.
*/
@Public
@Evolving
public interface SingleArcTransition<OPERAND, EVENT> {
/**
* Transition hook.
*
* @param operand the entity attached to the FSM, whose internal
* state may change.
* @param event causal event
*/
public void transition(OPERAND operand, EVENT event);
}
| 1,401 | 33.195122 | 74 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/state/VisualizeStateMachine.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.state;
import java.lang.reflect.Field;
import java.util.ArrayList;
import java.util.List;
import org.apache.hadoop.classification.InterfaceAudience.Private;
@Private
public class VisualizeStateMachine {
/**
* @param classes list of classes which have static field
* stateMachineFactory of type StateMachineFactory
* @return graph represent this StateMachine
*/
public static Graph getGraphFromClasses(String graphName, List<String> classes)
throws Exception {
Graph ret = null;
if (classes.size() != 1) {
ret = new Graph(graphName);
}
for (String className : classes) {
Class clz = Class.forName(className);
Field factoryField = clz.getDeclaredField("stateMachineFactory");
factoryField.setAccessible(true);
StateMachineFactory factory = (StateMachineFactory) factoryField.get(null);
if (classes.size() == 1) {
return factory.generateStateGraph(graphName);
}
String gname = clz.getSimpleName();
if (gname.endsWith("Impl")) {
gname = gname.substring(0, gname.length()-4);
}
ret.addSubGraph(factory.generateStateGraph(gname));
}
return ret;
}
public static void main(String [] args) throws Exception {
if (args.length < 3) {
System.err.printf("Usage: %s <GraphName> <class[,class[,...]]> <OutputFile>%n",
VisualizeStateMachine.class.getName());
System.exit(1);
}
String [] classes = args[1].split(",");
ArrayList<String> validClasses = new ArrayList<String>();
for (String c : classes) {
String vc = c.trim();
if (vc.length()>0) {
validClasses.add(vc);
}
}
Graph g = getGraphFromClasses(args[0], validClasses);
g.save(args[2]);
}
}
| 2,601 | 33.693333 | 85 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/state/InvalidStateTransitionException.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.state;
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Evolving;
import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
/**
* The exception that happens when you call invalid state transition.
*
*/
@Public
@Evolving
public class InvalidStateTransitionException extends YarnRuntimeException {
private static final long serialVersionUID = -6188669113571351684L;
private Enum<?> currentState;
private Enum<?> event;
public InvalidStateTransitionException(Enum<?> currentState, Enum<?> event) {
super("Invalid event: " + event + " at " + currentState);
this.currentState = currentState;
this.event = event;
}
public Enum<?> getCurrentState() {
return currentState;
}
public Enum<?> getEvent() {
return event;
}
}
| 1,681 | 31.346154 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/state/MultipleArcTransition.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.state;
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Evolving;
/**
* Hook for Transition.
* Post state is decided by Transition hook. Post state must be one of the
* valid post states registered in StateMachine.
*/
@Public
@Evolving
public interface MultipleArcTransition
<OPERAND, EVENT, STATE extends Enum<STATE>> {
/**
* Transition hook.
* @return the postState. Post state must be one of the
* valid post states registered in StateMachine.
* @param operand the entity attached to the FSM, whose internal
* state may change.
* @param event causal event
*/
public STATE transition(OPERAND operand, EVENT event);
}
| 1,599 | 34.555556 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/state/Graph.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.state;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.OutputStreamWriter;
import java.nio.charset.Charset;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import org.apache.commons.lang.StringEscapeUtils;
import org.apache.hadoop.classification.InterfaceAudience.Private;
@Private
public class Graph {
public class Edge {
Node from;
Node to;
String label;
public Edge(Node from, Node to, String info) {
this.from = from;
this.to = to;
this.label = info;
}
public boolean sameAs(Edge rhs) {
if (this.from == rhs.from &&
this.to == rhs.to) {
return true;
}
return false;
}
public Edge combine(Edge rhs) {
String newlabel = this.label + "," + rhs.label;
return new Edge(this.from, this.to, newlabel);
}
}
public class Node {
Graph parent;
String id;
List<Edge> ins;
List<Edge> outs;
public Node(String id) {
this.id = id;
this.parent = Graph.this;
this.ins = new ArrayList<Graph.Edge>();
this.outs = new ArrayList<Graph.Edge>();
}
public Graph getParent() {
return parent;
}
public Node addEdge(Node to, String info) {
Edge e = new Edge(this, to, info);
outs.add(e);
to.ins.add(e);
return this;
}
public String getUniqueId() {
return Graph.this.name + "." + id;
}
}
private String name;
private Graph parent;
private Set<Graph.Node> nodes = new HashSet<Graph.Node>();
private Set<Graph> subgraphs = new HashSet<Graph>();
public Graph(String name, Graph parent) {
this.name = name;
this.parent = parent;
}
public Graph(String name) {
this(name, null);
}
public Graph() {
this("graph", null);
}
public String getName() {
return name;
}
public Graph getParent() {
return parent;
}
private Node newNode(String id) {
Node ret = new Node(id);
nodes.add(ret);
return ret;
}
public Node getNode(String id) {
for (Node node : nodes) {
if (node.id.equals(id)) {
return node;
}
}
return newNode(id);
}
public Graph newSubGraph(String name) {
Graph ret = new Graph(name, this);
subgraphs.add(ret);
return ret;
}
public void addSubGraph(Graph graph) {
subgraphs.add(graph);
graph.parent = this;
}
private static String wrapSafeString(String label) {
if (label.indexOf(',') >= 0) {
if (label.length()>14) {
label = label.replaceAll(",", ",\n");
}
}
label = "\"" + StringEscapeUtils.escapeJava(label) + "\"";
return label;
}
public String generateGraphViz(String indent) {
StringBuilder sb = new StringBuilder();
if (this.parent == null) {
sb.append("digraph " + name + " {\n");
sb.append(String.format("graph [ label=%s, fontsize=24, fontname=Helvetica];%n",
wrapSafeString(name)));
sb.append("node [fontsize=12, fontname=Helvetica];\n");
sb.append("edge [fontsize=9, fontcolor=blue, fontname=Arial];\n");
} else {
sb.append("subgraph cluster_" + name + " {\nlabel=\"" + name + "\"\n");
}
for (Graph g : subgraphs) {
String ginfo = g.generateGraphViz(indent+" ");
sb.append(ginfo);
sb.append("\n");
}
for (Node n : nodes) {
sb.append(String.format(
"%s%s [ label = %s ];%n",
indent,
wrapSafeString(n.getUniqueId()),
n.id));
List<Edge> combinedOuts = combineEdges(n.outs);
for (Edge e : combinedOuts) {
sb.append(String.format(
"%s%s -> %s [ label = %s ];%n",
indent,
wrapSafeString(e.from.getUniqueId()),
wrapSafeString(e.to.getUniqueId()),
wrapSafeString(e.label)));
}
}
sb.append("}\n");
return sb.toString();
}
public String generateGraphViz() {
return generateGraphViz("");
}
public void save(String filepath) throws IOException {
try (OutputStreamWriter fout = new OutputStreamWriter(
new FileOutputStream(filepath), Charset.forName("UTF-8"));) {
fout.write(generateGraphViz());
}
}
public static List<Edge> combineEdges(List<Edge> edges) {
List<Edge> ret = new ArrayList<Edge>();
for (Edge edge : edges) {
boolean found = false;
for (int i = 0; i < ret.size(); i++) {
Edge current = ret.get(i);
if (edge.sameAs(current)) {
ret.set(i, current.combine(edge));
found = true;
break;
}
}
if (!found) {
ret.add(edge);
}
}
return ret;
}
}
| 5,535 | 24.62963 | 86 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/RpcClientFactory.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.factories;
import java.net.InetSocketAddress;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
@InterfaceAudience.LimitedPrivate({ "MapReduce", "YARN" })
public interface RpcClientFactory {
public Object getClient(Class<?> protocol, long clientVersion,
InetSocketAddress addr, Configuration conf);
public void stopClient(Object proxy);
}
| 1,249 | 35.764706 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/package-info.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.factories;
| 849 | 41.5 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/RpcServerFactory.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.factories;
import java.net.InetSocketAddress;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.ipc.Server;
import org.apache.hadoop.security.token.SecretManager;
import org.apache.hadoop.security.token.TokenIdentifier;
@InterfaceAudience.LimitedPrivate({ "MapReduce", "YARN" })
public interface RpcServerFactory {
public Server getServer(Class<?> protocol, Object instance,
InetSocketAddress addr, Configuration conf,
SecretManager<? extends TokenIdentifier> secretManager,
int numHandlers, String portRangeConfig);
}
| 1,462 | 39.638889 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/impl/pb/package-info.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
@InterfaceAudience.Private
package org.apache.hadoop.yarn.factories.impl.pb;
import org.apache.hadoop.classification.InterfaceAudience;
| 943 | 41.909091 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/impl/pb/RecordFactoryPBImpl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.factories.impl.pb;
import java.lang.reflect.Constructor;
import java.lang.reflect.InvocationTargetException;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
import org.apache.hadoop.yarn.factories.RecordFactory;
@Private
public class RecordFactoryPBImpl implements RecordFactory {
private static final String PB_IMPL_PACKAGE_SUFFIX = "impl.pb";
private static final String PB_IMPL_CLASS_SUFFIX = "PBImpl";
private static final RecordFactoryPBImpl self = new RecordFactoryPBImpl();
private Configuration localConf = new Configuration();
private ConcurrentMap<Class<?>, Constructor<?>> cache = new ConcurrentHashMap<Class<?>, Constructor<?>>();
private RecordFactoryPBImpl() {
}
public static RecordFactory get() {
return self;
}
@SuppressWarnings("unchecked")
@Override
public <T> T newRecordInstance(Class<T> clazz) {
Constructor<?> constructor = cache.get(clazz);
if (constructor == null) {
Class<?> pbClazz = null;
try {
pbClazz = localConf.getClassByName(getPBImplClassName(clazz));
} catch (ClassNotFoundException e) {
throw new YarnRuntimeException("Failed to load class: ["
+ getPBImplClassName(clazz) + "]", e);
}
try {
constructor = pbClazz.getConstructor();
constructor.setAccessible(true);
cache.putIfAbsent(clazz, constructor);
} catch (NoSuchMethodException e) {
throw new YarnRuntimeException("Could not find 0 argument constructor", e);
}
}
try {
Object retObject = constructor.newInstance();
return (T)retObject;
} catch (InvocationTargetException e) {
throw new YarnRuntimeException(e);
} catch (IllegalAccessException e) {
throw new YarnRuntimeException(e);
} catch (InstantiationException e) {
throw new YarnRuntimeException(e);
}
}
private String getPBImplClassName(Class<?> clazz) {
String srcPackagePart = getPackageName(clazz);
String srcClassName = getClassName(clazz);
String destPackagePart = srcPackagePart + "." + PB_IMPL_PACKAGE_SUFFIX;
String destClassPart = srcClassName + PB_IMPL_CLASS_SUFFIX;
return destPackagePart + "." + destClassPart;
}
private String getClassName(Class<?> clazz) {
String fqName = clazz.getName();
return (fqName.substring(fqName.lastIndexOf(".") + 1, fqName.length()));
}
private String getPackageName(Class<?> clazz) {
return clazz.getPackage().getName();
}
}
| 3,518 | 34.908163 | 108 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/impl/pb/RpcServerFactoryPBImpl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.factories.impl.pb;
import java.io.IOException;
import java.lang.reflect.Constructor;
import java.lang.reflect.InvocationTargetException;
import java.lang.reflect.Method;
import java.net.InetSocketAddress;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.ipc.ProtobufRpcEngine;
import org.apache.hadoop.ipc.Server;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.security.token.SecretManager;
import org.apache.hadoop.security.token.TokenIdentifier;
import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
import org.apache.hadoop.yarn.factories.RpcServerFactory;
import com.google.protobuf.BlockingService;
@Private
public class RpcServerFactoryPBImpl implements RpcServerFactory {
private static final Log LOG = LogFactory.getLog(RpcServerFactoryPBImpl.class);
private static final String PROTO_GEN_PACKAGE_NAME = "org.apache.hadoop.yarn.proto";
private static final String PROTO_GEN_CLASS_SUFFIX = "Service";
private static final String PB_IMPL_PACKAGE_SUFFIX = "impl.pb.service";
private static final String PB_IMPL_CLASS_SUFFIX = "PBServiceImpl";
private static final RpcServerFactoryPBImpl self = new RpcServerFactoryPBImpl();
private Configuration localConf = new Configuration();
private ConcurrentMap<Class<?>, Constructor<?>> serviceCache = new ConcurrentHashMap<Class<?>, Constructor<?>>();
private ConcurrentMap<Class<?>, Method> protoCache = new ConcurrentHashMap<Class<?>, Method>();
public static RpcServerFactoryPBImpl get() {
return RpcServerFactoryPBImpl.self;
}
private RpcServerFactoryPBImpl() {
}
public Server getServer(Class<?> protocol, Object instance,
InetSocketAddress addr, Configuration conf,
SecretManager<? extends TokenIdentifier> secretManager, int numHandlers) {
return getServer(protocol, instance, addr, conf, secretManager, numHandlers,
null);
}
@Override
public Server getServer(Class<?> protocol, Object instance,
InetSocketAddress addr, Configuration conf,
SecretManager<? extends TokenIdentifier> secretManager, int numHandlers,
String portRangeConfig) {
Constructor<?> constructor = serviceCache.get(protocol);
if (constructor == null) {
Class<?> pbServiceImplClazz = null;
try {
pbServiceImplClazz = localConf
.getClassByName(getPbServiceImplClassName(protocol));
} catch (ClassNotFoundException e) {
throw new YarnRuntimeException("Failed to load class: ["
+ getPbServiceImplClassName(protocol) + "]", e);
}
try {
constructor = pbServiceImplClazz.getConstructor(protocol);
constructor.setAccessible(true);
serviceCache.putIfAbsent(protocol, constructor);
} catch (NoSuchMethodException e) {
throw new YarnRuntimeException("Could not find constructor with params: "
+ Long.TYPE + ", " + InetSocketAddress.class + ", "
+ Configuration.class, e);
}
}
Object service = null;
try {
service = constructor.newInstance(instance);
} catch (InvocationTargetException e) {
throw new YarnRuntimeException(e);
} catch (IllegalAccessException e) {
throw new YarnRuntimeException(e);
} catch (InstantiationException e) {
throw new YarnRuntimeException(e);
}
Class<?> pbProtocol = service.getClass().getInterfaces()[0];
Method method = protoCache.get(protocol);
if (method == null) {
Class<?> protoClazz = null;
try {
protoClazz = localConf.getClassByName(getProtoClassName(protocol));
} catch (ClassNotFoundException e) {
throw new YarnRuntimeException("Failed to load class: ["
+ getProtoClassName(protocol) + "]", e);
}
try {
method = protoClazz.getMethod("newReflectiveBlockingService",
pbProtocol.getInterfaces()[0]);
method.setAccessible(true);
protoCache.putIfAbsent(protocol, method);
} catch (NoSuchMethodException e) {
throw new YarnRuntimeException(e);
}
}
try {
return createServer(pbProtocol, addr, conf, secretManager, numHandlers,
(BlockingService)method.invoke(null, service), portRangeConfig);
} catch (InvocationTargetException e) {
throw new YarnRuntimeException(e);
} catch (IllegalAccessException e) {
throw new YarnRuntimeException(e);
} catch (IOException e) {
throw new YarnRuntimeException(e);
}
}
private String getProtoClassName(Class<?> clazz) {
String srcClassName = getClassName(clazz);
return PROTO_GEN_PACKAGE_NAME + "." + srcClassName + "$" + srcClassName + PROTO_GEN_CLASS_SUFFIX;
}
private String getPbServiceImplClassName(Class<?> clazz) {
String srcPackagePart = getPackageName(clazz);
String srcClassName = getClassName(clazz);
String destPackagePart = srcPackagePart + "." + PB_IMPL_PACKAGE_SUFFIX;
String destClassPart = srcClassName + PB_IMPL_CLASS_SUFFIX;
return destPackagePart + "." + destClassPart;
}
private String getClassName(Class<?> clazz) {
String fqName = clazz.getName();
return (fqName.substring(fqName.lastIndexOf(".") + 1, fqName.length()));
}
private String getPackageName(Class<?> clazz) {
return clazz.getPackage().getName();
}
private Server createServer(Class<?> pbProtocol, InetSocketAddress addr, Configuration conf,
SecretManager<? extends TokenIdentifier> secretManager, int numHandlers,
BlockingService blockingService, String portRangeConfig) throws IOException {
RPC.setProtocolEngine(conf, pbProtocol, ProtobufRpcEngine.class);
RPC.Server server = new RPC.Builder(conf).setProtocol(pbProtocol)
.setInstance(blockingService).setBindAddress(addr.getHostName())
.setPort(addr.getPort()).setNumHandlers(numHandlers).setVerbose(false)
.setSecretManager(secretManager).setPortRangeConfig(portRangeConfig)
.build();
LOG.info("Adding protocol "+pbProtocol.getCanonicalName()+" to the server");
server.addProtocol(RPC.RpcKind.RPC_PROTOCOL_BUFFER, pbProtocol, blockingService);
return server;
}
}
| 7,230 | 39.396648 | 115 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/impl/pb/RpcClientFactoryPBImpl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.factories.impl.pb;
import java.io.Closeable;
import java.lang.reflect.Constructor;
import java.lang.reflect.InvocationHandler;
import java.lang.reflect.InvocationTargetException;
import java.lang.reflect.Proxy;
import java.net.InetSocketAddress;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
import org.apache.hadoop.yarn.factories.RpcClientFactory;
@Private
public class RpcClientFactoryPBImpl implements RpcClientFactory {
private static final Log LOG = LogFactory
.getLog(RpcClientFactoryPBImpl.class);
private static final String PB_IMPL_PACKAGE_SUFFIX = "impl.pb.client";
private static final String PB_IMPL_CLASS_SUFFIX = "PBClientImpl";
private static final RpcClientFactoryPBImpl self = new RpcClientFactoryPBImpl();
private Configuration localConf = new Configuration();
private ConcurrentMap<Class<?>, Constructor<?>> cache = new ConcurrentHashMap<Class<?>, Constructor<?>>();
public static RpcClientFactoryPBImpl get() {
return RpcClientFactoryPBImpl.self;
}
private RpcClientFactoryPBImpl() {
}
public Object getClient(Class<?> protocol, long clientVersion,
InetSocketAddress addr, Configuration conf) {
Constructor<?> constructor = cache.get(protocol);
if (constructor == null) {
Class<?> pbClazz = null;
try {
pbClazz = localConf.getClassByName(getPBImplClassName(protocol));
} catch (ClassNotFoundException e) {
throw new YarnRuntimeException("Failed to load class: ["
+ getPBImplClassName(protocol) + "]", e);
}
try {
constructor = pbClazz.getConstructor(Long.TYPE, InetSocketAddress.class, Configuration.class);
constructor.setAccessible(true);
cache.putIfAbsent(protocol, constructor);
} catch (NoSuchMethodException e) {
throw new YarnRuntimeException("Could not find constructor with params: " + Long.TYPE + ", " + InetSocketAddress.class + ", " + Configuration.class, e);
}
}
try {
Object retObject = constructor.newInstance(clientVersion, addr, conf);
return retObject;
} catch (InvocationTargetException e) {
throw new YarnRuntimeException(e);
} catch (IllegalAccessException e) {
throw new YarnRuntimeException(e);
} catch (InstantiationException e) {
throw new YarnRuntimeException(e);
}
}
@Override
public void stopClient(Object proxy) {
try {
if (proxy instanceof Closeable) {
((Closeable) proxy).close();
return;
} else {
InvocationHandler handler = Proxy.getInvocationHandler(proxy);
if (handler instanceof Closeable) {
((Closeable) handler).close();
return;
}
}
} catch (Exception e) {
LOG.error("Cannot call close method due to Exception. " + "Ignoring.", e);
throw new YarnRuntimeException(e);
}
throw new HadoopIllegalArgumentException(
"Cannot close proxy - is not Closeable or "
+ "does not provide closeable invocation handler " + proxy.getClass());
}
private String getPBImplClassName(Class<?> clazz) {
String srcPackagePart = getPackageName(clazz);
String srcClassName = getClassName(clazz);
String destPackagePart = srcPackagePart + "." + PB_IMPL_PACKAGE_SUFFIX;
String destClassPart = srcClassName + PB_IMPL_CLASS_SUFFIX;
return destPackagePart + "." + destClassPart;
}
private String getClassName(Class<?> clazz) {
String fqName = clazz.getName();
return (fqName.substring(fqName.lastIndexOf(".") + 1, fqName.length()));
}
private String getPackageName(Class<?> clazz) {
return clazz.getPackage().getName();
}
}
| 4,841 | 36.828125 | 160 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/AsyncDispatcher.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.event;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.LinkedBlockingQueue;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Evolving;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.service.AbstractService;
import org.apache.hadoop.util.ShutdownHookManager;
import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
import com.google.common.annotations.VisibleForTesting;
/**
* Dispatches {@link Event}s in a separate thread. Currently only single thread
* does that. Potentially there could be multiple channels for each event type
* class and a thread pool can be used to dispatch the events.
*/
@SuppressWarnings("rawtypes")
@Public
@Evolving
public class AsyncDispatcher extends AbstractService implements Dispatcher {
private static final Log LOG = LogFactory.getLog(AsyncDispatcher.class);
private final BlockingQueue<Event> eventQueue;
private volatile boolean stopped = false;
// Configuration flag for enabling/disabling draining dispatcher's events on
// stop functionality.
private volatile boolean drainEventsOnStop = false;
// Indicates all the remaining dispatcher's events on stop have been drained
// and processed.
private volatile boolean drained = true;
private Object waitForDrained = new Object();
// For drainEventsOnStop enabled only, block newly coming events into the
// queue while stopping.
private volatile boolean blockNewEvents = false;
private EventHandler handlerInstance = null;
private Thread eventHandlingThread;
protected final Map<Class<? extends Enum>, EventHandler> eventDispatchers;
private boolean exitOnDispatchException;
public AsyncDispatcher() {
this(new LinkedBlockingQueue<Event>());
}
public AsyncDispatcher(BlockingQueue<Event> eventQueue) {
super("Dispatcher");
this.eventQueue = eventQueue;
this.eventDispatchers = new HashMap<Class<? extends Enum>, EventHandler>();
}
Runnable createThread() {
return new Runnable() {
@Override
public void run() {
while (!stopped && !Thread.currentThread().isInterrupted()) {
drained = eventQueue.isEmpty();
// blockNewEvents is only set when dispatcher is draining to stop,
// adding this check is to avoid the overhead of acquiring the lock
// and calling notify every time in the normal run of the loop.
if (blockNewEvents) {
synchronized (waitForDrained) {
if (drained) {
waitForDrained.notify();
}
}
}
Event event;
try {
event = eventQueue.take();
} catch(InterruptedException ie) {
if (!stopped) {
LOG.warn("AsyncDispatcher thread interrupted", ie);
}
return;
}
if (event != null) {
dispatch(event);
}
}
}
};
}
@Override
protected void serviceInit(Configuration conf) throws Exception {
this.exitOnDispatchException =
conf.getBoolean(Dispatcher.DISPATCHER_EXIT_ON_ERROR_KEY,
Dispatcher.DEFAULT_DISPATCHER_EXIT_ON_ERROR);
super.serviceInit(conf);
}
@Override
protected void serviceStart() throws Exception {
//start all the components
super.serviceStart();
eventHandlingThread = new Thread(createThread());
eventHandlingThread.setName("AsyncDispatcher event handler");
eventHandlingThread.start();
}
public void setDrainEventsOnStop() {
drainEventsOnStop = true;
}
@Override
protected void serviceStop() throws Exception {
if (drainEventsOnStop) {
blockNewEvents = true;
LOG.info("AsyncDispatcher is draining to stop, igonring any new events.");
synchronized (waitForDrained) {
while (!drained && eventHandlingThread != null
&& eventHandlingThread.isAlive()) {
waitForDrained.wait(1000);
LOG.info("Waiting for AsyncDispatcher to drain. Thread state is :" +
eventHandlingThread.getState());
}
}
}
stopped = true;
if (eventHandlingThread != null) {
eventHandlingThread.interrupt();
try {
eventHandlingThread.join();
} catch (InterruptedException ie) {
LOG.warn("Interrupted Exception while stopping", ie);
}
}
// stop all the components
super.serviceStop();
}
@SuppressWarnings("unchecked")
protected void dispatch(Event event) {
//all events go thru this loop
if (LOG.isDebugEnabled()) {
LOG.debug("Dispatching the event " + event.getClass().getName() + "."
+ event.toString());
}
Class<? extends Enum> type = event.getType().getDeclaringClass();
try{
EventHandler handler = eventDispatchers.get(type);
if(handler != null) {
handler.handle(event);
} else {
throw new Exception("No handler for registered for " + type);
}
} catch (Throwable t) {
//TODO Maybe log the state of the queue
LOG.fatal("Error in dispatcher thread", t);
// If serviceStop is called, we should exit this thread gracefully.
if (exitOnDispatchException
&& (ShutdownHookManager.get().isShutdownInProgress()) == false
&& stopped == false) {
Thread shutDownThread = new Thread(createShutDownThread());
shutDownThread.setName("AsyncDispatcher ShutDown handler");
shutDownThread.start();
}
}
}
@SuppressWarnings("unchecked")
@Override
public void register(Class<? extends Enum> eventType,
EventHandler handler) {
/* check to see if we have a listener registered */
EventHandler<Event> registeredHandler = (EventHandler<Event>)
eventDispatchers.get(eventType);
LOG.info("Registering " + eventType + " for " + handler.getClass());
if (registeredHandler == null) {
eventDispatchers.put(eventType, handler);
} else if (!(registeredHandler instanceof MultiListenerHandler)){
/* for multiple listeners of an event add the multiple listener handler */
MultiListenerHandler multiHandler = new MultiListenerHandler();
multiHandler.addHandler(registeredHandler);
multiHandler.addHandler(handler);
eventDispatchers.put(eventType, multiHandler);
} else {
/* already a multilistener, just add to it */
MultiListenerHandler multiHandler
= (MultiListenerHandler) registeredHandler;
multiHandler.addHandler(handler);
}
}
@Override
public EventHandler getEventHandler() {
if (handlerInstance == null) {
handlerInstance = new GenericEventHandler();
}
return handlerInstance;
}
class GenericEventHandler implements EventHandler<Event> {
public void handle(Event event) {
if (blockNewEvents) {
return;
}
drained = false;
/* all this method does is enqueue all the events onto the queue */
int qSize = eventQueue.size();
if (qSize !=0 && qSize %1000 == 0) {
LOG.info("Size of event-queue is " + qSize);
}
int remCapacity = eventQueue.remainingCapacity();
if (remCapacity < 1000) {
LOG.warn("Very low remaining capacity in the event-queue: "
+ remCapacity);
}
try {
eventQueue.put(event);
} catch (InterruptedException e) {
if (!stopped) {
LOG.warn("AsyncDispatcher thread interrupted", e);
}
// Need to reset drained flag to true if event queue is empty,
// otherwise dispatcher will hang on stop.
drained = eventQueue.isEmpty();
throw new YarnRuntimeException(e);
}
};
}
/**
* Multiplexing an event. Sending it to different handlers that
* are interested in the event.
* @param <T> the type of event these multiple handlers are interested in.
*/
static class MultiListenerHandler implements EventHandler<Event> {
List<EventHandler<Event>> listofHandlers;
public MultiListenerHandler() {
listofHandlers = new ArrayList<EventHandler<Event>>();
}
@Override
public void handle(Event event) {
for (EventHandler<Event> handler: listofHandlers) {
handler.handle(event);
}
}
void addHandler(EventHandler<Event> handler) {
listofHandlers.add(handler);
}
}
Runnable createShutDownThread() {
return new Runnable() {
@Override
public void run() {
LOG.info("Exiting, bbye..");
System.exit(-1);
}
};
}
@VisibleForTesting
protected boolean isEventThreadWaiting() {
return eventHandlingThread.getState() == Thread.State.WAITING;
}
@VisibleForTesting
protected boolean isDrained() {
return this.drained;
}
}
| 9,841 | 31.481848 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/package-info.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
@InterfaceAudience.Public
package org.apache.hadoop.yarn.event;
import org.apache.hadoop.classification.InterfaceAudience;
| 930 | 41.318182 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/Event.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.event;
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Evolving;
/**
* Interface defining events api.
*
*/
@Public
@Evolving
public interface Event<TYPE extends Enum<TYPE>> {
TYPE getType();
long getTimestamp();
String toString();
}
| 1,145 | 30.833333 | 74 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/Dispatcher.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.event;
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Evolving;
/**
* Event Dispatcher interface. It dispatches events to registered
* event handlers based on event types.
*
*/
@SuppressWarnings("rawtypes")
@Public
@Evolving
public interface Dispatcher {
// Configuration to make sure dispatcher crashes but doesn't do system-exit in
// case of errors. By default, it should be false, so that tests are not
// affected. For all daemons it should be explicitly set to true so that
// daemons can crash instead of hanging around.
public static final String DISPATCHER_EXIT_ON_ERROR_KEY =
"yarn.dispatcher.exit-on-error";
public static final boolean DEFAULT_DISPATCHER_EXIT_ON_ERROR = false;
EventHandler getEventHandler();
void register(Class<? extends Enum> eventType, EventHandler handler);
}
| 1,729 | 35.041667 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/EventHandler.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.event;
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Evolving;
/**
* Interface for handling events of type T
*
* @param <T> parameterized event of type T
*/
@SuppressWarnings("rawtypes")
@Public
@Evolving
public interface EventHandler<T extends Event> {
void handle(T event);
}
| 1,190 | 31.189189 | 74 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/AbstractEvent.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.event;
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Evolving;
/**
* Parent class of all the events. All events extend this class.
*/
@Public
@Evolving
public abstract class AbstractEvent<TYPE extends Enum<TYPE>>
implements Event<TYPE> {
private final TYPE type;
private final long timestamp;
// use this if you DON'T care about the timestamp
public AbstractEvent(TYPE type) {
this.type = type;
// We're not generating a real timestamp here. It's too expensive.
timestamp = -1L;
}
// use this if you care about the timestamp
public AbstractEvent(TYPE type, long timestamp) {
this.type = type;
this.timestamp = timestamp;
}
@Override
public long getTimestamp() {
return timestamp;
}
@Override
public TYPE getType() {
return type;
}
@Override
public String toString() {
return "EventType: " + getType();
}
}
| 1,790 | 27.428571 | 74 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/ipc/package-info.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
@InterfaceAudience.LimitedPrivate({ "MapReduce", "YARN" })
package org.apache.hadoop.yarn.ipc;
import org.apache.hadoop.classification.InterfaceAudience;
| 961 | 42.727273 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/ipc/YarnRPC.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.ipc;
import java.net.InetSocketAddress;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.ipc.Server;
import org.apache.hadoop.security.token.SecretManager;
import org.apache.hadoop.security.token.TokenIdentifier;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
/**
* Abstraction to get the RPC implementation for Yarn.
*/
@InterfaceAudience.LimitedPrivate({ "MapReduce", "YARN" })
public abstract class YarnRPC {
private static final Log LOG = LogFactory.getLog(YarnRPC.class);
public abstract Object getProxy(Class protocol, InetSocketAddress addr,
Configuration conf);
public abstract void stopProxy(Object proxy, Configuration conf);
public abstract Server getServer(Class protocol, Object instance,
InetSocketAddress addr, Configuration conf,
SecretManager<? extends TokenIdentifier> secretManager,
int numHandlers, String portRangeConfig);
public Server getServer(Class protocol, Object instance,
InetSocketAddress addr, Configuration conf,
SecretManager<? extends TokenIdentifier> secretManager,
int numHandlers) {
return getServer(protocol, instance, addr, conf, secretManager, numHandlers,
null);
}
public static YarnRPC create(Configuration conf) {
LOG.debug("Creating YarnRPC for " +
conf.get(YarnConfiguration.IPC_RPC_IMPL));
String clazzName = conf.get(YarnConfiguration.IPC_RPC_IMPL);
if (clazzName == null) {
clazzName = YarnConfiguration.DEFAULT_IPC_RPC_IMPL;
}
try {
return (YarnRPC) Class.forName(clazzName).newInstance();
} catch (Exception e) {
throw new YarnRuntimeException(e);
}
}
}
| 2,702 | 36.027397 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/ipc/HadoopYarnProtoRPC.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.ipc;
import java.net.InetSocketAddress;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.ipc.Server;
import org.apache.hadoop.security.token.SecretManager;
import org.apache.hadoop.security.token.TokenIdentifier;
import org.apache.hadoop.yarn.factory.providers.RpcFactoryProvider;
/**
* This uses Hadoop RPC. Uses a tunnel ProtoSpecificRpcEngine over
* Hadoop connection.
* This does not give cross-language wire compatibility, since the Hadoop
* RPC wire format is non-standard, but it does permit use of Protocol Buffers
* protocol versioning features for inter-Java RPCs.
*/
@InterfaceAudience.LimitedPrivate({ "MapReduce", "YARN" })
public class HadoopYarnProtoRPC extends YarnRPC {
private static final Log LOG = LogFactory.getLog(HadoopYarnProtoRPC.class);
@Override
public Object getProxy(Class protocol, InetSocketAddress addr,
Configuration conf) {
LOG.debug("Creating a HadoopYarnProtoRpc proxy for protocol " + protocol);
return RpcFactoryProvider.getClientFactory(conf).getClient(protocol, 1,
addr, conf);
}
@Override
public void stopProxy(Object proxy, Configuration conf) {
RpcFactoryProvider.getClientFactory(conf).stopClient(proxy);
}
@Override
public Server getServer(Class protocol, Object instance,
InetSocketAddress addr, Configuration conf,
SecretManager<? extends TokenIdentifier> secretManager,
int numHandlers, String portRangeConfig) {
LOG.debug("Creating a HadoopYarnProtoRpc server for protocol " + protocol +
" with " + numHandlers + " handlers");
return RpcFactoryProvider.getServerFactory(conf).getServer(protocol,
instance, addr, conf, secretManager, numHandlers, portRangeConfig);
}
}
| 2,734 | 37.521127 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/ipc/RPCUtil.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.ipc;
import java.io.IOException;
import java.lang.reflect.Constructor;
import java.lang.reflect.InvocationTargetException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.yarn.exceptions.YarnException;
import com.google.protobuf.ServiceException;
@InterfaceAudience.LimitedPrivate({ "MapReduce", "YARN" })
public class RPCUtil {
/**
* Returns an instance of {@link YarnException}
*/
public static YarnException getRemoteException(Throwable t) {
return new YarnException(t);
}
/**
* Returns an instance of {@link YarnException}
*/
public static YarnException getRemoteException(String message) {
return new YarnException(message);
}
private static <T extends Throwable> T instantiateException(
Class<? extends T> cls, RemoteException re) throws RemoteException {
try {
Constructor<? extends T> cn = cls.getConstructor(String.class);
cn.setAccessible(true);
T ex = cn.newInstance(re.getMessage());
ex.initCause(re);
return ex;
// RemoteException contains useful information as against the
// java.lang.reflect exceptions.
} catch (NoSuchMethodException e) {
throw re;
} catch (IllegalArgumentException e) {
throw re;
} catch (SecurityException e) {
throw re;
} catch (InstantiationException e) {
throw re;
} catch (IllegalAccessException e) {
throw re;
} catch (InvocationTargetException e) {
throw re;
}
}
private static <T extends YarnException> T instantiateYarnException(
Class<? extends T> cls, RemoteException re) throws RemoteException {
return instantiateException(cls, re);
}
private static <T extends IOException> T instantiateIOException(
Class<? extends T> cls, RemoteException re) throws RemoteException {
return instantiateException(cls, re);
}
private static <T extends RuntimeException> T instantiateRuntimeException(
Class<? extends T> cls, RemoteException re) throws RemoteException {
return instantiateException(cls, re);
}
/**
* Utility method that unwraps and returns appropriate exceptions.
*
* @param se
* ServiceException
* @return An instance of the actual exception, which will be a subclass of
* {@link YarnException} or {@link IOException}
*/
public static Void unwrapAndThrowException(ServiceException se)
throws IOException, YarnException {
Throwable cause = se.getCause();
if (cause == null) {
// SE generated by the RPC layer itself.
throw new IOException(se);
} else {
if (cause instanceof RemoteException) {
RemoteException re = (RemoteException) cause;
Class<?> realClass = null;
try {
realClass = Class.forName(re.getClassName());
} catch (ClassNotFoundException cnf) {
// Assume this to be a new exception type added to YARN. This isn't
// absolutely correct since the RPC layer could add an exception as
// well.
throw instantiateYarnException(YarnException.class, re);
}
if (YarnException.class.isAssignableFrom(realClass)) {
throw instantiateYarnException(
realClass.asSubclass(YarnException.class), re);
} else if (IOException.class.isAssignableFrom(realClass)) {
throw instantiateIOException(realClass.asSubclass(IOException.class),
re);
} else if (RuntimeException.class.isAssignableFrom(realClass)) {
throw instantiateRuntimeException(
realClass.asSubclass(RuntimeException.class), re);
} else {
throw re;
}
// RemoteException contains useful information as against the
// java.lang.reflect exceptions.
} else if (cause instanceof IOException) {
// RPC Client exception.
throw (IOException) cause;
} else if (cause instanceof RuntimeException) {
// RPC RuntimeException
throw (RuntimeException) cause;
} else {
// Should not be generated.
throw new IOException(se);
}
}
}
}
| 5,013 | 34.062937 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/package-info.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
@InterfaceAudience.Public
package org.apache.hadoop.yarn.logaggregation;
import org.apache.hadoop.classification.InterfaceAudience;
| 939 | 41.727273 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogDeletionService.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.logaggregation;
import java.io.IOException;
import java.util.Timer;
import java.util.TimerTask;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.service.AbstractService;
import org.apache.hadoop.yarn.api.ApplicationClientProtocol;
import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportRequest;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ApplicationReport;
import org.apache.hadoop.yarn.api.records.YarnApplicationState;
import org.apache.hadoop.yarn.client.ClientRMProxy;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.exceptions.ApplicationNotFoundException;
import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.util.ConverterUtils;
import com.google.common.annotations.VisibleForTesting;
/**
* A service that periodically deletes aggregated logs.
*/
@InterfaceAudience.LimitedPrivate({"yarn", "mapreduce"})
public class AggregatedLogDeletionService extends AbstractService {
private static final Log LOG = LogFactory.getLog(AggregatedLogDeletionService.class);
private Timer timer = null;
private long checkIntervalMsecs;
private LogDeletionTask task;
static class LogDeletionTask extends TimerTask {
private Configuration conf;
private long retentionMillis;
private String suffix = null;
private Path remoteRootLogDir = null;
private ApplicationClientProtocol rmClient = null;
public LogDeletionTask(Configuration conf, long retentionSecs, ApplicationClientProtocol rmClient) {
this.conf = conf;
this.retentionMillis = retentionSecs * 1000;
this.suffix = LogAggregationUtils.getRemoteNodeLogDirSuffix(conf);
this.remoteRootLogDir =
new Path(conf.get(YarnConfiguration.NM_REMOTE_APP_LOG_DIR,
YarnConfiguration.DEFAULT_NM_REMOTE_APP_LOG_DIR));
this.rmClient = rmClient;
}
@Override
public void run() {
long cutoffMillis = System.currentTimeMillis() - retentionMillis;
LOG.info("aggregated log deletion started.");
try {
FileSystem fs = remoteRootLogDir.getFileSystem(conf);
for(FileStatus userDir : fs.listStatus(remoteRootLogDir)) {
if(userDir.isDirectory()) {
Path userDirPath = new Path(userDir.getPath(), suffix);
deleteOldLogDirsFrom(userDirPath, cutoffMillis, fs, rmClient);
}
}
} catch (IOException e) {
logIOException("Error reading root log dir this deletion " +
"attempt is being aborted", e);
}
LOG.info("aggregated log deletion finished.");
}
private static void deleteOldLogDirsFrom(Path dir, long cutoffMillis,
FileSystem fs, ApplicationClientProtocol rmClient) {
try {
for(FileStatus appDir : fs.listStatus(dir)) {
if(appDir.isDirectory() &&
appDir.getModificationTime() < cutoffMillis) {
boolean appTerminated =
isApplicationTerminated(ConverterUtils.toApplicationId(appDir
.getPath().getName()), rmClient);
if(appTerminated && shouldDeleteLogDir(appDir, cutoffMillis, fs)) {
try {
LOG.info("Deleting aggregated logs in "+appDir.getPath());
fs.delete(appDir.getPath(), true);
} catch (IOException e) {
logIOException("Could not delete "+appDir.getPath(), e);
}
} else if (!appTerminated){
try {
for(FileStatus node: fs.listStatus(appDir.getPath())) {
if(node.getModificationTime() < cutoffMillis) {
try {
fs.delete(node.getPath(), true);
} catch (IOException ex) {
logIOException("Could not delete "+appDir.getPath(), ex);
}
}
}
} catch(IOException e) {
logIOException(
"Error reading the contents of " + appDir.getPath(), e);
}
}
}
}
} catch (IOException e) {
logIOException("Could not read the contents of " + dir, e);
}
}
private static boolean shouldDeleteLogDir(FileStatus dir, long cutoffMillis,
FileSystem fs) {
boolean shouldDelete = true;
try {
for(FileStatus node: fs.listStatus(dir.getPath())) {
if(node.getModificationTime() >= cutoffMillis) {
shouldDelete = false;
break;
}
}
} catch(IOException e) {
logIOException("Error reading the contents of " + dir.getPath(), e);
shouldDelete = false;
}
return shouldDelete;
}
private static boolean isApplicationTerminated(ApplicationId appId,
ApplicationClientProtocol rmClient) throws IOException {
ApplicationReport appReport = null;
try {
appReport =
rmClient.getApplicationReport(
GetApplicationReportRequest.newInstance(appId))
.getApplicationReport();
} catch (ApplicationNotFoundException e) {
return true;
} catch (YarnException e) {
throw new IOException(e);
}
YarnApplicationState currentState = appReport.getYarnApplicationState();
return currentState == YarnApplicationState.FAILED
|| currentState == YarnApplicationState.KILLED
|| currentState == YarnApplicationState.FINISHED;
}
public ApplicationClientProtocol getRMClient() {
return this.rmClient;
}
}
private static void logIOException(String comment, IOException e) {
if(e instanceof AccessControlException) {
String message = e.getMessage();
//TODO fix this after HADOOP-8661
message = message.split("\n")[0];
LOG.warn(comment + " " + message);
} else {
LOG.error(comment, e);
}
}
public AggregatedLogDeletionService() {
super(AggregatedLogDeletionService.class.getName());
}
@Override
protected void serviceStart() throws Exception {
scheduleLogDeletionTask();
super.serviceStart();
}
@Override
protected void serviceStop() throws Exception {
stopRMClient();
stopTimer();
super.serviceStop();
}
private void setLogAggCheckIntervalMsecs(long retentionSecs) {
Configuration conf = getConfig();
checkIntervalMsecs = 1000 * conf
.getLong(
YarnConfiguration.LOG_AGGREGATION_RETAIN_CHECK_INTERVAL_SECONDS,
YarnConfiguration.DEFAULT_LOG_AGGREGATION_RETAIN_CHECK_INTERVAL_SECONDS);
if (checkIntervalMsecs <= 0) {
// when unspecified compute check interval as 1/10th of retention
checkIntervalMsecs = (retentionSecs * 1000) / 10;
}
}
public void refreshLogRetentionSettings() throws IOException {
if (getServiceState() == STATE.STARTED) {
Configuration conf = createConf();
setConfig(conf);
stopRMClient();
stopTimer();
scheduleLogDeletionTask();
} else {
LOG.warn("Failed to execute refreshLogRetentionSettings : Aggregated Log Deletion Service is not started");
}
}
private void scheduleLogDeletionTask() throws IOException {
Configuration conf = getConfig();
if (!conf.getBoolean(YarnConfiguration.LOG_AGGREGATION_ENABLED,
YarnConfiguration.DEFAULT_LOG_AGGREGATION_ENABLED)) {
// Log aggregation is not enabled so don't bother
return;
}
long retentionSecs = conf.getLong(
YarnConfiguration.LOG_AGGREGATION_RETAIN_SECONDS,
YarnConfiguration.DEFAULT_LOG_AGGREGATION_RETAIN_SECONDS);
if (retentionSecs < 0) {
LOG.info("Log Aggregation deletion is disabled because retention is"
+ " too small (" + retentionSecs + ")");
return;
}
setLogAggCheckIntervalMsecs(retentionSecs);
task = new LogDeletionTask(conf, retentionSecs, creatRMClient());
timer = new Timer();
timer.scheduleAtFixedRate(task, 0, checkIntervalMsecs);
}
private void stopTimer() {
if (timer != null) {
timer.cancel();
}
}
public long getCheckIntervalMsecs() {
return checkIntervalMsecs;
}
protected Configuration createConf() {
return new Configuration();
}
// Directly create and use ApplicationClientProtocol.
// We have already marked ApplicationClientProtocol.getApplicationReport
// as @Idempotent, it will automatically take care of RM restart/failover.
@VisibleForTesting
protected ApplicationClientProtocol creatRMClient() throws IOException {
return ClientRMProxy.createRMProxy(getConfig(),
ApplicationClientProtocol.class);
}
@VisibleForTesting
protected void stopRMClient() {
if (task != null && task.getRMClient() != null) {
RPC.stopProxy(task.getRMClient());
}
}
}
| 10,062 | 35.197842 | 113 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/LogAggregationUtils.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.logaggregation;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import com.google.common.annotations.VisibleForTesting;
@Private
public class LogAggregationUtils {
public static final String TMP_FILE_SUFFIX = ".tmp";
/**
* Constructs the full filename for an application's log file per node.
* @param remoteRootLogDir
* @param appId
* @param user
* @param nodeId
* @param suffix
* @return the remote log file.
*/
public static Path getRemoteNodeLogFileForApp(Path remoteRootLogDir,
ApplicationId appId, String user, NodeId nodeId, String suffix) {
return new Path(getRemoteAppLogDir(remoteRootLogDir, appId, user, suffix),
getNodeString(nodeId));
}
/**
* Gets the remote app log dir.
* @param remoteRootLogDir
* @param appId
* @param user
* @param suffix
* @return the remote application specific log dir.
*/
public static Path getRemoteAppLogDir(Path remoteRootLogDir,
ApplicationId appId, String user, String suffix) {
return new Path(getRemoteLogSuffixedDir(remoteRootLogDir, user, suffix),
appId.toString());
}
/**
* Gets the remote suffixed log dir for the user.
* @param remoteRootLogDir
* @param user
* @param suffix
* @return the remote suffixed log dir.
*/
public static Path getRemoteLogSuffixedDir(Path remoteRootLogDir,
String user, String suffix) {
if (suffix == null || suffix.isEmpty()) {
return getRemoteLogUserDir(remoteRootLogDir, user);
}
// TODO Maybe support suffix to be more than a single file.
return new Path(getRemoteLogUserDir(remoteRootLogDir, user), suffix);
}
// TODO Add a utility method to list available log files. Ignore the
// temporary ones.
/**
* Gets the remote log user dir.
* @param remoteRootLogDir
* @param user
* @return the remote per user log dir.
*/
public static Path getRemoteLogUserDir(Path remoteRootLogDir, String user) {
return new Path(remoteRootLogDir, user);
}
/**
* Returns the suffix component of the log dir.
* @param conf
* @return the suffix which will be appended to the user log dir.
*/
public static String getRemoteNodeLogDirSuffix(Configuration conf) {
return conf.get(YarnConfiguration.NM_REMOTE_APP_LOG_DIR_SUFFIX,
YarnConfiguration.DEFAULT_NM_REMOTE_APP_LOG_DIR_SUFFIX);
}
/**
* Converts a nodeId to a form used in the app log file name.
* @param nodeId
* @return the node string to be used to construct the file name.
*/
@VisibleForTesting
public static String getNodeString(NodeId nodeId) {
return nodeId.toString().replace(":", "_");
}
@VisibleForTesting
public static String getNodeString(String nodeId) {
return nodeId.toString().replace(":", "_");
}
}
| 3,865 | 31.487395 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogFormat.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.logaggregation;
import java.io.DataInput;
import java.io.DataInputStream;
import java.io.DataOutput;
import java.io.DataOutputStream;
import java.io.EOFException;
import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.InputStreamReader;
import java.io.OutputStream;
import java.io.PrintStream;
import java.io.Writer;
import java.nio.charset.Charset;
import java.security.PrivilegedExceptionAction;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.EnumSet;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;
import java.util.regex.Pattern;
import org.apache.commons.io.input.BoundedInputStream;
import org.apache.commons.io.output.WriterOutputStream;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Evolving;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CreateFlag;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileContext;
import org.apache.hadoop.fs.Options;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.SecureIOUtils;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.file.tfile.TFile;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.yarn.api.records.ApplicationAccessType;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.LogAggregationContext;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
import org.apache.hadoop.yarn.util.ConverterUtils;
import org.apache.hadoop.yarn.util.Times;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Predicate;
import com.google.common.collect.Iterables;
import com.google.common.collect.Sets;
@Public
@Evolving
public class AggregatedLogFormat {
private static final Log LOG = LogFactory.getLog(AggregatedLogFormat.class);
private static final LogKey APPLICATION_ACL_KEY = new LogKey("APPLICATION_ACL");
private static final LogKey APPLICATION_OWNER_KEY = new LogKey("APPLICATION_OWNER");
private static final LogKey VERSION_KEY = new LogKey("VERSION");
private static final Map<String, LogKey> RESERVED_KEYS;
//Maybe write out the retention policy.
//Maybe write out a list of containerLogs skipped by the retention policy.
private static final int VERSION = 1;
/**
* Umask for the log file.
*/
private static final FsPermission APP_LOG_FILE_UMASK = FsPermission
.createImmutable((short) (0640 ^ 0777));
static {
RESERVED_KEYS = new HashMap<String, AggregatedLogFormat.LogKey>();
RESERVED_KEYS.put(APPLICATION_ACL_KEY.toString(), APPLICATION_ACL_KEY);
RESERVED_KEYS.put(APPLICATION_OWNER_KEY.toString(), APPLICATION_OWNER_KEY);
RESERVED_KEYS.put(VERSION_KEY.toString(), VERSION_KEY);
}
@Public
public static class LogKey implements Writable {
private String keyString;
public LogKey() {
}
public LogKey(ContainerId containerId) {
this.keyString = containerId.toString();
}
public LogKey(String keyString) {
this.keyString = keyString;
}
@Override
public int hashCode() {
return keyString == null ? 0 : keyString.hashCode();
}
@Override
public boolean equals(Object obj) {
if (obj instanceof LogKey) {
LogKey other = (LogKey) obj;
if (this.keyString == null) {
return other.keyString == null;
}
return this.keyString.equals(other.keyString);
}
return false;
}
@Private
@Override
public void write(DataOutput out) throws IOException {
out.writeUTF(this.keyString);
}
@Private
@Override
public void readFields(DataInput in) throws IOException {
this.keyString = in.readUTF();
}
@Override
public String toString() {
return this.keyString;
}
}
@Private
public static class LogValue {
private final List<String> rootLogDirs;
private final ContainerId containerId;
private final String user;
private final LogAggregationContext logAggregationContext;
private Set<File> uploadedFiles = new HashSet<File>();
private final Set<String> alreadyUploadedLogFiles;
private Set<String> allExistingFileMeta = new HashSet<String>();
private final boolean appFinished;
// TODO Maybe add a version string here. Instead of changing the version of
// the entire k-v format
public LogValue(List<String> rootLogDirs, ContainerId containerId,
String user) {
this(rootLogDirs, containerId, user, null, new HashSet<String>(), true);
}
public LogValue(List<String> rootLogDirs, ContainerId containerId,
String user, LogAggregationContext logAggregationContext,
Set<String> alreadyUploadedLogFiles, boolean appFinished) {
this.rootLogDirs = new ArrayList<String>(rootLogDirs);
this.containerId = containerId;
this.user = user;
// Ensure logs are processed in lexical order
Collections.sort(this.rootLogDirs);
this.logAggregationContext = logAggregationContext;
this.alreadyUploadedLogFiles = alreadyUploadedLogFiles;
this.appFinished = appFinished;
}
private Set<File> getPendingLogFilesToUploadForThisContainer() {
Set<File> pendingUploadFiles = new HashSet<File>();
for (String rootLogDir : this.rootLogDirs) {
File appLogDir =
new File(rootLogDir,
ConverterUtils.toString(
this.containerId.getApplicationAttemptId().
getApplicationId())
);
File containerLogDir =
new File(appLogDir, ConverterUtils.toString(this.containerId));
if (!containerLogDir.isDirectory()) {
continue; // ContainerDir may have been deleted by the user.
}
pendingUploadFiles
.addAll(getPendingLogFilesToUpload(containerLogDir));
}
return pendingUploadFiles;
}
public void write(DataOutputStream out, Set<File> pendingUploadFiles)
throws IOException {
List<File> fileList = new ArrayList<File>(pendingUploadFiles);
Collections.sort(fileList);
for (File logFile : fileList) {
// We only aggregate top level files.
// Ignore anything inside sub-folders.
if (logFile.isDirectory()) {
LOG.warn(logFile.getAbsolutePath() + " is a directory. Ignore it.");
continue;
}
FileInputStream in = null;
try {
in = secureOpenFile(logFile);
} catch (IOException e) {
logErrorMessage(logFile, e);
IOUtils.cleanup(LOG, in);
continue;
}
final long fileLength = logFile.length();
// Write the logFile Type
out.writeUTF(logFile.getName());
// Write the log length as UTF so that it is printable
out.writeUTF(String.valueOf(fileLength));
// Write the log itself
try {
byte[] buf = new byte[65535];
int len = 0;
long bytesLeft = fileLength;
while ((len = in.read(buf)) != -1) {
//If buffer contents within fileLength, write
if (len < bytesLeft) {
out.write(buf, 0, len);
bytesLeft-=len;
}
//else only write contents within fileLength, then exit early
else {
out.write(buf, 0, (int)bytesLeft);
break;
}
}
long newLength = logFile.length();
if(fileLength < newLength) {
LOG.warn("Aggregated logs truncated by approximately "+
(newLength-fileLength) +" bytes.");
}
this.uploadedFiles.add(logFile);
} catch (IOException e) {
String message = logErrorMessage(logFile, e);
out.write(message.getBytes(Charset.forName("UTF-8")));
} finally {
IOUtils.cleanup(LOG, in);
}
}
}
@VisibleForTesting
public FileInputStream secureOpenFile(File logFile) throws IOException {
return SecureIOUtils.openForRead(logFile, getUser(), null);
}
private static String logErrorMessage(File logFile, Exception e) {
String message = "Error aggregating log file. Log file : "
+ logFile.getAbsolutePath() + ". " + e.getMessage();
LOG.error(message, e);
return message;
}
// Added for testing purpose.
public String getUser() {
return user;
}
private Set<File> getPendingLogFilesToUpload(File containerLogDir) {
Set<File> candidates =
new HashSet<File>(Arrays.asList(containerLogDir.listFiles()));
for (File logFile : candidates) {
this.allExistingFileMeta.add(getLogFileMetaData(logFile));
}
if (this.logAggregationContext != null && candidates.size() > 0) {
filterFiles(
this.appFinished ? this.logAggregationContext.getIncludePattern()
: this.logAggregationContext.getRolledLogsIncludePattern(),
candidates, false);
filterFiles(
this.appFinished ? this.logAggregationContext.getExcludePattern()
: this.logAggregationContext.getRolledLogsExcludePattern(),
candidates, true);
Iterable<File> mask =
Iterables.filter(candidates, new Predicate<File>() {
@Override
public boolean apply(File next) {
return !alreadyUploadedLogFiles
.contains(getLogFileMetaData(next));
}
});
candidates = Sets.newHashSet(mask);
}
return candidates;
}
private void filterFiles(String pattern, Set<File> candidates,
boolean exclusion) {
if (pattern != null && !pattern.isEmpty()) {
Pattern filterPattern = Pattern.compile(pattern);
for (Iterator<File> candidatesItr = candidates.iterator(); candidatesItr
.hasNext();) {
File candidate = candidatesItr.next();
boolean match = filterPattern.matcher(candidate.getName()).find();
if ((!match && !exclusion) || (match && exclusion)) {
candidatesItr.remove();
}
}
}
}
public Set<Path> getCurrentUpLoadedFilesPath() {
Set<Path> path = new HashSet<Path>();
for (File file : this.uploadedFiles) {
path.add(new Path(file.getAbsolutePath()));
}
return path;
}
public Set<String> getCurrentUpLoadedFileMeta() {
Set<String> info = new HashSet<String>();
for (File file : this.uploadedFiles) {
info.add(getLogFileMetaData(file));
}
return info;
}
public Set<String> getAllExistingFilesMeta() {
return this.allExistingFileMeta;
}
private String getLogFileMetaData(File file) {
return containerId.toString() + "_" + file.getName() + "_"
+ file.lastModified();
}
}
/**
* The writer that writes out the aggregated logs.
*/
@Private
public static class LogWriter {
private final FSDataOutputStream fsDataOStream;
private final TFile.Writer writer;
private FileContext fc;
public LogWriter(final Configuration conf, final Path remoteAppLogFile,
UserGroupInformation userUgi) throws IOException {
try {
this.fsDataOStream =
userUgi.doAs(new PrivilegedExceptionAction<FSDataOutputStream>() {
@Override
public FSDataOutputStream run() throws Exception {
fc = FileContext.getFileContext(remoteAppLogFile.toUri(), conf);
fc.setUMask(APP_LOG_FILE_UMASK);
return fc.create(
remoteAppLogFile,
EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE),
new Options.CreateOpts[] {});
}
});
} catch (InterruptedException e) {
throw new IOException(e);
}
// Keys are not sorted: null arg
// 256KB minBlockSize : Expected log size for each container too
this.writer =
new TFile.Writer(this.fsDataOStream, 256 * 1024, conf.get(
YarnConfiguration.NM_LOG_AGG_COMPRESSION_TYPE,
YarnConfiguration.DEFAULT_NM_LOG_AGG_COMPRESSION_TYPE), null, conf);
//Write the version string
writeVersion();
}
@VisibleForTesting
public TFile.Writer getWriter() {
return this.writer;
}
private void writeVersion() throws IOException {
DataOutputStream out = this.writer.prepareAppendKey(-1);
VERSION_KEY.write(out);
out.close();
out = this.writer.prepareAppendValue(-1);
out.writeInt(VERSION);
out.close();
}
public void writeApplicationOwner(String user) throws IOException {
DataOutputStream out = this.writer.prepareAppendKey(-1);
APPLICATION_OWNER_KEY.write(out);
out.close();
out = this.writer.prepareAppendValue(-1);
out.writeUTF(user);
out.close();
}
public void writeApplicationACLs(Map<ApplicationAccessType, String> appAcls)
throws IOException {
DataOutputStream out = this.writer.prepareAppendKey(-1);
APPLICATION_ACL_KEY.write(out);
out.close();
out = this.writer.prepareAppendValue(-1);
for (Entry<ApplicationAccessType, String> entry : appAcls.entrySet()) {
out.writeUTF(entry.getKey().toString());
out.writeUTF(entry.getValue());
}
out.close();
}
public void append(LogKey logKey, LogValue logValue) throws IOException {
Set<File> pendingUploadFiles =
logValue.getPendingLogFilesToUploadForThisContainer();
if (pendingUploadFiles.size() == 0) {
return;
}
DataOutputStream out = this.writer.prepareAppendKey(-1);
logKey.write(out);
out.close();
out = this.writer.prepareAppendValue(-1);
logValue.write(out, pendingUploadFiles);
out.close();
}
public void close() {
try {
this.writer.close();
} catch (IOException e) {
LOG.warn("Exception closing writer", e);
}
IOUtils.closeStream(fsDataOStream);
}
}
@Public
@Evolving
public static class LogReader {
private final FSDataInputStream fsDataIStream;
private final TFile.Reader.Scanner scanner;
private final TFile.Reader reader;
public LogReader(Configuration conf, Path remoteAppLogFile)
throws IOException {
FileContext fileContext =
FileContext.getFileContext(remoteAppLogFile.toUri(), conf);
this.fsDataIStream = fileContext.open(remoteAppLogFile);
reader =
new TFile.Reader(this.fsDataIStream, fileContext.getFileStatus(
remoteAppLogFile).getLen(), conf);
this.scanner = reader.createScanner();
}
private boolean atBeginning = true;
/**
* Returns the owner of the application.
*
* @return the application owner.
* @throws IOException
*/
public String getApplicationOwner() throws IOException {
TFile.Reader.Scanner ownerScanner = null;
try {
ownerScanner = reader.createScanner();
LogKey key = new LogKey();
while (!ownerScanner.atEnd()) {
TFile.Reader.Scanner.Entry entry = ownerScanner.entry();
key.readFields(entry.getKeyStream());
if (key.toString().equals(APPLICATION_OWNER_KEY.toString())) {
DataInputStream valueStream = entry.getValueStream();
return valueStream.readUTF();
}
ownerScanner.advance();
}
return null;
} finally {
IOUtils.cleanup(LOG, ownerScanner);
}
}
/**
* Returns ACLs for the application. An empty map is returned if no ACLs are
* found.
*
* @return a map of the Application ACLs.
* @throws IOException
*/
public Map<ApplicationAccessType, String> getApplicationAcls()
throws IOException {
// TODO Seek directly to the key once a comparator is specified.
TFile.Reader.Scanner aclScanner = null;
try {
aclScanner = reader.createScanner();
LogKey key = new LogKey();
Map<ApplicationAccessType, String> acls =
new HashMap<ApplicationAccessType, String>();
while (!aclScanner.atEnd()) {
TFile.Reader.Scanner.Entry entry = aclScanner.entry();
key.readFields(entry.getKeyStream());
if (key.toString().equals(APPLICATION_ACL_KEY.toString())) {
DataInputStream valueStream = entry.getValueStream();
while (true) {
String appAccessOp = null;
String aclString = null;
try {
appAccessOp = valueStream.readUTF();
} catch (EOFException e) {
// Valid end of stream.
break;
}
try {
aclString = valueStream.readUTF();
} catch (EOFException e) {
throw new YarnRuntimeException("Error reading ACLs", e);
}
acls.put(ApplicationAccessType.valueOf(appAccessOp), aclString);
}
}
aclScanner.advance();
}
return acls;
} finally {
IOUtils.cleanup(LOG, aclScanner);
}
}
/**
* Read the next key and return the value-stream.
*
* @param key
* @return the valueStream if there are more keys or null otherwise.
* @throws IOException
*/
public DataInputStream next(LogKey key) throws IOException {
if (!this.atBeginning) {
this.scanner.advance();
} else {
this.atBeginning = false;
}
if (this.scanner.atEnd()) {
return null;
}
TFile.Reader.Scanner.Entry entry = this.scanner.entry();
key.readFields(entry.getKeyStream());
// Skip META keys
if (RESERVED_KEYS.containsKey(key.toString())) {
return next(key);
}
DataInputStream valueStream = entry.getValueStream();
return valueStream;
}
/**
* Get a ContainerLogsReader to read the logs for
* the specified container.
*
* @param containerId
* @return object to read the container's logs or null if the
* logs could not be found
* @throws IOException
*/
@Private
public ContainerLogsReader getContainerLogsReader(
ContainerId containerId) throws IOException {
ContainerLogsReader logReader = null;
final LogKey containerKey = new LogKey(containerId);
LogKey key = new LogKey();
DataInputStream valueStream = next(key);
while (valueStream != null && !key.equals(containerKey)) {
valueStream = next(key);
}
if (valueStream != null) {
logReader = new ContainerLogsReader(valueStream);
}
return logReader;
}
//TODO Change Log format and interfaces to be containerId specific.
// Avoid returning completeValueStreams.
// public List<String> getTypesForContainer(DataInputStream valueStream){}
//
// /**
// * @param valueStream
// * The Log stream for the container.
// * @param fileType
// * the log type required.
// * @return An InputStreamReader for the required log type or null if the
// * type is not found.
// * @throws IOException
// */
// public InputStreamReader getLogStreamForType(DataInputStream valueStream,
// String fileType) throws IOException {
// valueStream.reset();
// try {
// while (true) {
// String ft = valueStream.readUTF();
// String fileLengthStr = valueStream.readUTF();
// long fileLength = Long.parseLong(fileLengthStr);
// if (ft.equals(fileType)) {
// BoundedInputStream bis =
// new BoundedInputStream(valueStream, fileLength);
// return new InputStreamReader(bis);
// } else {
// long totalSkipped = 0;
// long currSkipped = 0;
// while (currSkipped != -1 && totalSkipped < fileLength) {
// currSkipped = valueStream.skip(fileLength - totalSkipped);
// totalSkipped += currSkipped;
// }
// // TODO Verify skip behaviour.
// if (currSkipped == -1) {
// return null;
// }
// }
// }
// } catch (EOFException e) {
// return null;
// }
// }
/**
* Writes all logs for a single container to the provided writer.
* @param valueStream
* @param writer
* @param logUploadedTime
* @throws IOException
*/
public static void readAcontainerLogs(DataInputStream valueStream,
Writer writer, long logUploadedTime) throws IOException {
OutputStream os = null;
PrintStream ps = null;
try {
os = new WriterOutputStream(writer, Charset.forName("UTF-8"));
ps = new PrintStream(os);
while (true) {
try {
readContainerLogs(valueStream, ps, logUploadedTime);
} catch (EOFException e) {
// EndOfFile
return;
}
}
} finally {
IOUtils.cleanup(LOG, ps);
IOUtils.cleanup(LOG, os);
}
}
/**
* Writes all logs for a single container to the provided writer.
* @param valueStream
* @param writer
* @throws IOException
*/
public static void readAcontainerLogs(DataInputStream valueStream,
Writer writer) throws IOException {
readAcontainerLogs(valueStream, writer, -1);
}
private static void readContainerLogs(DataInputStream valueStream,
PrintStream out, long logUploadedTime) throws IOException {
byte[] buf = new byte[65535];
String fileType = valueStream.readUTF();
String fileLengthStr = valueStream.readUTF();
long fileLength = Long.parseLong(fileLengthStr);
out.print("LogType:");
out.println(fileType);
if (logUploadedTime != -1) {
out.print("Log Upload Time:");
out.println(Times.format(logUploadedTime));
}
out.print("LogLength:");
out.println(fileLengthStr);
out.println("Log Contents:");
long curRead = 0;
long pendingRead = fileLength - curRead;
int toRead =
pendingRead > buf.length ? buf.length : (int) pendingRead;
int len = valueStream.read(buf, 0, toRead);
while (len != -1 && curRead < fileLength) {
out.write(buf, 0, len);
curRead += len;
pendingRead = fileLength - curRead;
toRead =
pendingRead > buf.length ? buf.length : (int) pendingRead;
len = valueStream.read(buf, 0, toRead);
}
out.println("End of LogType:" + fileType);
out.println("");
}
/**
* Keep calling this till you get a {@link EOFException} for getting logs of
* all types for a single container.
*
* @param valueStream
* @param out
* @param logUploadedTime
* @throws IOException
*/
public static void readAContainerLogsForALogType(
DataInputStream valueStream, PrintStream out, long logUploadedTime)
throws IOException {
readContainerLogs(valueStream, out, logUploadedTime);
}
/**
* Keep calling this till you get a {@link EOFException} for getting logs of
* all types for a single container.
*
* @param valueStream
* @param out
* @throws IOException
*/
public static void readAContainerLogsForALogType(
DataInputStream valueStream, PrintStream out)
throws IOException {
readAContainerLogsForALogType(valueStream, out, -1);
}
/**
* Keep calling this till you get a {@link EOFException} for getting logs of
* the specific types for a single container.
* @param valueStream
* @param out
* @param logUploadedTime
* @param logType
* @throws IOException
*/
public static int readContainerLogsForALogType(
DataInputStream valueStream, PrintStream out, long logUploadedTime,
List<String> logType) throws IOException {
byte[] buf = new byte[65535];
String fileType = valueStream.readUTF();
String fileLengthStr = valueStream.readUTF();
long fileLength = Long.parseLong(fileLengthStr);
if (logType.contains(fileType)) {
out.print("LogType:");
out.println(fileType);
if (logUploadedTime != -1) {
out.print("Log Upload Time:");
out.println(Times.format(logUploadedTime));
}
out.print("LogLength:");
out.println(fileLengthStr);
out.println("Log Contents:");
long curRead = 0;
long pendingRead = fileLength - curRead;
int toRead = pendingRead > buf.length ? buf.length : (int) pendingRead;
int len = valueStream.read(buf, 0, toRead);
while (len != -1 && curRead < fileLength) {
out.write(buf, 0, len);
curRead += len;
pendingRead = fileLength - curRead;
toRead = pendingRead > buf.length ? buf.length : (int) pendingRead;
len = valueStream.read(buf, 0, toRead);
}
out.println("End of LogType:" + fileType);
out.println("");
return 0;
} else {
long totalSkipped = 0;
long currSkipped = 0;
while (currSkipped != -1 && totalSkipped < fileLength) {
currSkipped = valueStream.skip(fileLength - totalSkipped);
totalSkipped += currSkipped;
}
return -1;
}
}
public void close() {
IOUtils.cleanup(LOG, scanner, reader, fsDataIStream);
}
}
@Private
public static class ContainerLogsReader {
private DataInputStream valueStream;
private String currentLogType = null;
private long currentLogLength = 0;
private BoundedInputStream currentLogData = null;
private InputStreamReader currentLogISR;
public ContainerLogsReader(DataInputStream stream) {
valueStream = stream;
}
public String nextLog() throws IOException {
if (currentLogData != null && currentLogLength > 0) {
// seek to the end of the current log, relying on BoundedInputStream
// to prevent seeking past the end of the current log
do {
if (currentLogData.skip(currentLogLength) < 0) {
break;
}
} while (currentLogData.read() != -1);
}
currentLogType = null;
currentLogLength = 0;
currentLogData = null;
currentLogISR = null;
try {
String logType = valueStream.readUTF();
String logLengthStr = valueStream.readUTF();
currentLogLength = Long.parseLong(logLengthStr);
currentLogData =
new BoundedInputStream(valueStream, currentLogLength);
currentLogData.setPropagateClose(false);
currentLogISR = new InputStreamReader(currentLogData,
Charset.forName("UTF-8"));
currentLogType = logType;
} catch (EOFException e) {
}
return currentLogType;
}
public String getCurrentLogType() {
return currentLogType;
}
public long getCurrentLogLength() {
return currentLogLength;
}
public long skip(long n) throws IOException {
return currentLogData.skip(n);
}
public int read() throws IOException {
return currentLogData.read();
}
public int read(byte[] buf, int off, int len) throws IOException {
return currentLogData.read(buf, off, len);
}
public int read(char[] buf, int off, int len) throws IOException {
return currentLogISR.read(buf, off, len);
}
}
}
| 29,115 | 31.899435 | 86 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/ContainerLogsRetentionPolicy.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.logaggregation;
import org.apache.hadoop.classification.InterfaceAudience.Private;
@Private
/**
* This API is not exposed to end-users yet.
*/
public enum ContainerLogsRetentionPolicy {
APPLICATION_MASTER_ONLY, AM_AND_FAILED_CONTAINERS_ONLY, ALL_CONTAINERS
}
| 1,105 | 35.866667 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/LogCLIHelpers.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.logaggregation;
import java.io.DataInputStream;
import java.io.EOFException;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.PrintStream;
import java.util.List;
import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.conf.Configurable;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileContext;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.RemoteIterator;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat.LogKey;
import org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat.LogReader;
import org.apache.hadoop.yarn.util.ConverterUtils;
import com.google.common.annotations.VisibleForTesting;
public class LogCLIHelpers implements Configurable {
private Configuration conf;
@Private
@VisibleForTesting
public int dumpAContainersLogs(String appId, String containerId,
String nodeId, String jobOwner) throws IOException {
return dumpAContainersLogsForALogType(appId, containerId, nodeId, jobOwner,
null);
}
@Private
@VisibleForTesting
public int dumpAContainersLogsForALogType(String appId, String containerId,
String nodeId, String jobOwner, List<String> logType) throws IOException {
Path remoteRootLogDir = new Path(getConf().get(
YarnConfiguration.NM_REMOTE_APP_LOG_DIR,
YarnConfiguration.DEFAULT_NM_REMOTE_APP_LOG_DIR));
String suffix = LogAggregationUtils.getRemoteNodeLogDirSuffix(getConf());
Path remoteAppLogDir = LogAggregationUtils.getRemoteAppLogDir(
remoteRootLogDir, ConverterUtils.toApplicationId(appId), jobOwner,
suffix);
RemoteIterator<FileStatus> nodeFiles;
try {
Path qualifiedLogDir =
FileContext.getFileContext(getConf()).makeQualified(
remoteAppLogDir);
nodeFiles =
FileContext.getFileContext(qualifiedLogDir.toUri(), getConf())
.listStatus(remoteAppLogDir);
} catch (FileNotFoundException fnf) {
logDirNotExist(remoteAppLogDir.toString());
return -1;
}
boolean foundContainerLogs = false;
while (nodeFiles.hasNext()) {
FileStatus thisNodeFile = nodeFiles.next();
String fileName = thisNodeFile.getPath().getName();
if (fileName.contains(LogAggregationUtils.getNodeString(nodeId))
&& !fileName.endsWith(LogAggregationUtils.TMP_FILE_SUFFIX)) {
AggregatedLogFormat.LogReader reader = null;
try {
reader =
new AggregatedLogFormat.LogReader(getConf(),
thisNodeFile.getPath());
if (logType == null) {
if (dumpAContainerLogs(containerId, reader, System.out,
thisNodeFile.getModificationTime()) > -1) {
foundContainerLogs = true;
}
} else {
if (dumpAContainerLogsForALogType(containerId, reader, System.out,
thisNodeFile.getModificationTime(), logType) > -1) {
foundContainerLogs = true;
}
}
} finally {
if (reader != null) {
reader.close();
}
}
}
}
if (!foundContainerLogs) {
containerLogNotFound(containerId);
return -1;
}
return 0;
}
@Private
public int dumpAContainerLogs(String containerIdStr,
AggregatedLogFormat.LogReader reader, PrintStream out,
long logUploadedTime) throws IOException {
DataInputStream valueStream;
LogKey key = new LogKey();
valueStream = reader.next(key);
while (valueStream != null && !key.toString().equals(containerIdStr)) {
// Next container
key = new LogKey();
valueStream = reader.next(key);
}
if (valueStream == null) {
return -1;
}
boolean foundContainerLogs = false;
while (true) {
try {
LogReader.readAContainerLogsForALogType(valueStream, out,
logUploadedTime);
foundContainerLogs = true;
} catch (EOFException eof) {
break;
}
}
if (foundContainerLogs) {
return 0;
}
return -1;
}
@Private
public int dumpAContainerLogsForALogType(String containerIdStr,
AggregatedLogFormat.LogReader reader, PrintStream out,
long logUploadedTime, List<String> logType) throws IOException {
DataInputStream valueStream;
LogKey key = new LogKey();
valueStream = reader.next(key);
while (valueStream != null && !key.toString().equals(containerIdStr)) {
// Next container
key = new LogKey();
valueStream = reader.next(key);
}
if (valueStream == null) {
return -1;
}
boolean foundContainerLogs = false;
while (true) {
try {
int result = LogReader.readContainerLogsForALogType(
valueStream, out, logUploadedTime, logType);
if (result == 0) {
foundContainerLogs = true;
}
} catch (EOFException eof) {
break;
}
}
if (foundContainerLogs) {
return 0;
}
return -1;
}
@Private
public int dumpAllContainersLogs(ApplicationId appId, String appOwner,
PrintStream out) throws IOException {
Path remoteRootLogDir = new Path(getConf().get(
YarnConfiguration.NM_REMOTE_APP_LOG_DIR,
YarnConfiguration.DEFAULT_NM_REMOTE_APP_LOG_DIR));
String user = appOwner;
String logDirSuffix = LogAggregationUtils.getRemoteNodeLogDirSuffix(getConf());
// TODO Change this to get a list of files from the LAS.
Path remoteAppLogDir = LogAggregationUtils.getRemoteAppLogDir(
remoteRootLogDir, appId, user, logDirSuffix);
RemoteIterator<FileStatus> nodeFiles;
try {
Path qualifiedLogDir =
FileContext.getFileContext(getConf()).makeQualified(remoteAppLogDir);
nodeFiles = FileContext.getFileContext(qualifiedLogDir.toUri(),
getConf()).listStatus(remoteAppLogDir);
} catch (FileNotFoundException fnf) {
logDirNotExist(remoteAppLogDir.toString());
return -1;
}
boolean foundAnyLogs = false;
while (nodeFiles.hasNext()) {
FileStatus thisNodeFile = nodeFiles.next();
if (!thisNodeFile.getPath().getName()
.endsWith(LogAggregationUtils.TMP_FILE_SUFFIX)) {
AggregatedLogFormat.LogReader reader =
new AggregatedLogFormat.LogReader(getConf(), thisNodeFile.getPath());
try {
DataInputStream valueStream;
LogKey key = new LogKey();
valueStream = reader.next(key);
while (valueStream != null) {
String containerString =
"\n\nContainer: " + key + " on " + thisNodeFile.getPath().getName();
out.println(containerString);
out.println(StringUtils.repeat("=", containerString.length()));
while (true) {
try {
LogReader.readAContainerLogsForALogType(valueStream, out,
thisNodeFile.getModificationTime());
foundAnyLogs = true;
} catch (EOFException eof) {
break;
}
}
// Next container
key = new LogKey();
valueStream = reader.next(key);
}
} finally {
reader.close();
}
}
}
if (! foundAnyLogs) {
emptyLogDir(remoteAppLogDir.toString());
return -1;
}
return 0;
}
@Override
public void setConf(Configuration conf) {
this.conf = conf;
}
@Override
public Configuration getConf() {
return this.conf;
}
private static void containerLogNotFound(String containerId) {
System.out.println("Logs for container " + containerId
+ " are not present in this log-file.");
}
private static void logDirNotExist(String remoteAppLogDir) {
System.out.println(remoteAppLogDir + " does not exist.");
System.out.println("Log aggregation has not completed or is not enabled.");
}
private static void emptyLogDir(String remoteAppLogDir) {
System.out.println(remoteAppLogDir + " does not have any log files.");
}
}
| 9,115 | 32.028986 | 84 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/security/package-info.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
@InterfaceAudience.Private
package org.apache.hadoop.yarn.server.security;
import org.apache.hadoop.classification.InterfaceAudience;
| 941 | 41.818182 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/security/ApplicationACLsManager.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.security;
import java.util.HashMap;
import java.util.Map;
import java.util.Map.Entry;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authorize.AccessControlList;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ApplicationAccessType;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.security.AdminACLsManager;
import com.google.common.annotations.VisibleForTesting;
@InterfaceAudience.Private
public class ApplicationACLsManager {
private static final Log LOG = LogFactory
.getLog(ApplicationACLsManager.class);
private static AccessControlList DEFAULT_YARN_APP_ACL
= new AccessControlList(YarnConfiguration.DEFAULT_YARN_APP_ACL);
private final Configuration conf;
private final AdminACLsManager adminAclsManager;
private final ConcurrentMap<ApplicationId, Map<ApplicationAccessType, AccessControlList>> applicationACLS
= new ConcurrentHashMap<ApplicationId, Map<ApplicationAccessType, AccessControlList>>();
@VisibleForTesting
public ApplicationACLsManager() {
this(new Configuration());
}
public ApplicationACLsManager(Configuration conf) {
this.conf = conf;
this.adminAclsManager = new AdminACLsManager(this.conf);
}
public boolean areACLsEnabled() {
return adminAclsManager.areACLsEnabled();
}
public void addApplication(ApplicationId appId,
Map<ApplicationAccessType, String> acls) {
Map<ApplicationAccessType, AccessControlList> finalMap
= new HashMap<ApplicationAccessType, AccessControlList>(acls.size());
for (Entry<ApplicationAccessType, String> acl : acls.entrySet()) {
finalMap.put(acl.getKey(), new AccessControlList(acl.getValue()));
}
this.applicationACLS.put(appId, finalMap);
}
public void removeApplication(ApplicationId appId) {
this.applicationACLS.remove(appId);
}
/**
* If authorization is enabled, checks whether the user (in the callerUGI) is
* authorized to perform the access specified by 'applicationAccessType' on
* the application by checking if the user is applicationOwner or part of
* application ACL for the specific access-type.
* <ul>
* <li>The owner of the application can have all access-types on the
* application</li>
* <li>For all other users/groups application-acls are checked</li>
* </ul>
*
* @param callerUGI
* @param applicationAccessType
* @param applicationOwner
* @param applicationId
*/
public boolean checkAccess(UserGroupInformation callerUGI,
ApplicationAccessType applicationAccessType, String applicationOwner,
ApplicationId applicationId) {
if (LOG.isDebugEnabled()) {
LOG.debug("Verifying access-type " + applicationAccessType + " for "
+ callerUGI + " on application " + applicationId + " owned by "
+ applicationOwner);
}
String user = callerUGI.getShortUserName();
if (!areACLsEnabled()) {
return true;
}
AccessControlList applicationACL = DEFAULT_YARN_APP_ACL;
Map<ApplicationAccessType, AccessControlList> acls = this.applicationACLS
.get(applicationId);
if (acls == null) {
if (LOG.isDebugEnabled()) {
LOG.debug("ACL not found for application "
+ applicationId + " owned by "
+ applicationOwner + ". Using default ["
+ YarnConfiguration.DEFAULT_YARN_APP_ACL + "]");
}
} else {
AccessControlList applicationACLInMap = acls.get(applicationAccessType);
if (applicationACLInMap != null) {
applicationACL = applicationACLInMap;
} else if (LOG.isDebugEnabled()) {
LOG.debug("ACL not found for access-type " + applicationAccessType
+ " for application " + applicationId + " owned by "
+ applicationOwner + ". Using default ["
+ YarnConfiguration.DEFAULT_YARN_APP_ACL + "]");
}
}
// Allow application-owner for any type of access on the application
if (this.adminAclsManager.isAdmin(callerUGI)
|| user.equals(applicationOwner)
|| applicationACL.isUserAllowed(callerUGI)) {
return true;
}
return false;
}
/**
* Check if the given user in an admin.
*
* @param calledUGI
* UserGroupInformation for the user
* @return true if the user is an admin, false otherwise
*/
public final boolean isAdmin(final UserGroupInformation calledUGI) {
return this.adminAclsManager.isAdmin(calledUGI);
}
}
| 5,721 | 36.398693 | 107 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/ResourceManagerAdministrationProtocolPB.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.api;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.ipc.ProtocolInfo;
import org.apache.hadoop.yarn.proto.ResourceManagerAdministrationProtocol.ResourceManagerAdministrationProtocolService;
@Private
@Unstable
@ProtocolInfo(
protocolName = "org.apache.hadoop.yarn.server.api.ResourceManagerAdministrationProtocolPB",
protocolVersion = 1)
public interface ResourceManagerAdministrationProtocolPB extends ResourceManagerAdministrationProtocolService.BlockingInterface {
}
| 1,439 | 42.636364 | 129 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RefreshQueuesRequestPBImpl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshQueuesRequestProto;
import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshQueuesRequest;
import com.google.protobuf.TextFormat;
@Private
@Unstable
public class RefreshQueuesRequestPBImpl extends RefreshQueuesRequest {
RefreshQueuesRequestProto proto = RefreshQueuesRequestProto.getDefaultInstance();
RefreshQueuesRequestProto.Builder builder = null;
boolean viaProto = false;
public RefreshQueuesRequestPBImpl() {
builder = RefreshQueuesRequestProto.newBuilder();
}
public RefreshQueuesRequestPBImpl(RefreshQueuesRequestProto proto) {
this.proto = proto;
viaProto = true;
}
public RefreshQueuesRequestProto getProto() {
proto = viaProto ? proto : builder.build();
viaProto = true;
return proto;
}
@Override
public int hashCode() {
return getProto().hashCode();
}
@Override
public boolean equals(Object other) {
if (other == null)
return false;
if (other.getClass().isAssignableFrom(this.getClass())) {
return this.getProto().equals(this.getClass().cast(other).getProto());
}
return false;
}
@Override
public String toString() {
return TextFormat.shortDebugString(getProto());
}
}
| 2,277 | 31.084507 | 101 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RemoveFromClusterNodeLabelsResponsePBImpl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RemoveFromClusterNodeLabelsResponseProto;
import org.apache.hadoop.yarn.server.api.protocolrecords.RemoveFromClusterNodeLabelsResponse;
import com.google.protobuf.TextFormat;
public class RemoveFromClusterNodeLabelsResponsePBImpl extends
RemoveFromClusterNodeLabelsResponse {
RemoveFromClusterNodeLabelsResponseProto proto =
RemoveFromClusterNodeLabelsResponseProto.getDefaultInstance();
RemoveFromClusterNodeLabelsResponseProto.Builder builder = null;
boolean viaProto = false;
public RemoveFromClusterNodeLabelsResponsePBImpl() {
builder = RemoveFromClusterNodeLabelsResponseProto.newBuilder();
}
public RemoveFromClusterNodeLabelsResponsePBImpl(
RemoveFromClusterNodeLabelsResponseProto proto) {
this.proto = proto;
viaProto = true;
}
public RemoveFromClusterNodeLabelsResponseProto getProto() {
proto = viaProto ? proto : builder.build();
viaProto = true;
return proto;
}
@Override
public int hashCode() {
return getProto().hashCode();
}
@Override
public boolean equals(Object other) {
if (other == null)
return false;
if (other.getClass().isAssignableFrom(this.getClass())) {
return this.getProto().equals(this.getClass().cast(other).getProto());
}
return false;
}
@Override
public String toString() {
return TextFormat.shortDebugString(getProto());
}
}
| 2,331 | 32.314286 | 116 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/package-info.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
@InterfaceAudience.Private
package org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb;
import org.apache.hadoop.classification.InterfaceAudience;
| 960 | 42.681818 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RunSharedCacheCleanerTaskRequestPBImpl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb;
import org.apache.hadoop.yarn.server.api.protocolrecords.RunSharedCacheCleanerTaskRequest;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.RunSharedCacheCleanerTaskRequestProto;
public class RunSharedCacheCleanerTaskRequestPBImpl extends
RunSharedCacheCleanerTaskRequest {
RunSharedCacheCleanerTaskRequestProto proto =
RunSharedCacheCleanerTaskRequestProto.getDefaultInstance();
RunSharedCacheCleanerTaskRequestProto.Builder builder = null;
boolean viaProto = false;
public RunSharedCacheCleanerTaskRequestPBImpl() {
builder = RunSharedCacheCleanerTaskRequestProto.newBuilder();
}
public RunSharedCacheCleanerTaskRequestPBImpl(
RunSharedCacheCleanerTaskRequestProto proto) {
this.proto = proto;
viaProto = true;
}
public RunSharedCacheCleanerTaskRequestProto getProto() {
proto = viaProto ? proto : builder.build();
viaProto = true;
return proto;
}
private void maybeInitBuilder() {
if (viaProto || builder == null) {
builder = RunSharedCacheCleanerTaskRequestProto.newBuilder(proto);
}
viaProto = false;
}
}
| 1,976 | 35.611111 | 92 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/CheckForDecommissioningNodesRequestPBImpl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.CheckForDecommissioningNodesRequestProto;
import org.apache.hadoop.yarn.server.api.protocolrecords.CheckForDecommissioningNodesRequest;
import com.google.protobuf.TextFormat;
@Private
@Unstable
public class CheckForDecommissioningNodesRequestPBImpl extends
CheckForDecommissioningNodesRequest {
CheckForDecommissioningNodesRequestProto proto = CheckForDecommissioningNodesRequestProto
.getDefaultInstance();
CheckForDecommissioningNodesRequestProto.Builder builder = null;
boolean viaProto = false;
public CheckForDecommissioningNodesRequestPBImpl() {
builder = CheckForDecommissioningNodesRequestProto.newBuilder();
}
public CheckForDecommissioningNodesRequestPBImpl(
CheckForDecommissioningNodesRequestProto proto) {
this.proto = proto;
viaProto = true;
}
public CheckForDecommissioningNodesRequestProto getProto() {
proto = viaProto ? proto : builder.build();
viaProto = true;
return proto;
}
@Override
public int hashCode() {
return getProto().hashCode();
}
@Override
public boolean equals(Object other) {
if (other == null)
return false;
if (other.getClass().isAssignableFrom(this.getClass())) {
return this.getProto().equals(this.getClass().cast(other).getProto());
}
return false;
}
@Override
public String toString() {
return TextFormat.shortDebugString(getProto());
}
}
| 2,487 | 32.621622 | 116 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/ReplaceLabelsOnNodeRequestPBImpl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.api.records.impl.pb.NodeIdPBImpl;
import org.apache.hadoop.yarn.proto.YarnProtos.NodeIdProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.NodeIdToLabelsNameProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.ReplaceLabelsOnNodeRequestProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.ReplaceLabelsOnNodeRequestProtoOrBuilder;
import org.apache.hadoop.yarn.server.api.protocolrecords.ReplaceLabelsOnNodeRequest;
import com.google.common.collect.Sets;
public class ReplaceLabelsOnNodeRequestPBImpl extends
ReplaceLabelsOnNodeRequest {
ReplaceLabelsOnNodeRequestProto proto = ReplaceLabelsOnNodeRequestProto
.getDefaultInstance();
ReplaceLabelsOnNodeRequestProto.Builder builder = null;
boolean viaProto = false;
private Map<NodeId, Set<String>> nodeIdToLabels;
public ReplaceLabelsOnNodeRequestPBImpl() {
this.builder = ReplaceLabelsOnNodeRequestProto.newBuilder();
}
public ReplaceLabelsOnNodeRequestPBImpl(ReplaceLabelsOnNodeRequestProto proto) {
this.proto = proto;
this.viaProto = true;
}
private void initNodeToLabels() {
if (this.nodeIdToLabels != null) {
return;
}
ReplaceLabelsOnNodeRequestProtoOrBuilder p = viaProto ? proto : builder;
List<NodeIdToLabelsNameProto> list = p.getNodeToLabelsList();
this.nodeIdToLabels = new HashMap<NodeId, Set<String>>();
for (NodeIdToLabelsNameProto c : list) {
this.nodeIdToLabels.put(new NodeIdPBImpl(c.getNodeId()),
Sets.newHashSet(c.getNodeLabelsList()));
}
}
private void maybeInitBuilder() {
if (viaProto || builder == null) {
builder = ReplaceLabelsOnNodeRequestProto.newBuilder(proto);
}
viaProto = false;
}
private void addNodeToLabelsToProto() {
maybeInitBuilder();
builder.clearNodeToLabels();
if (nodeIdToLabels == null) {
return;
}
Iterable<NodeIdToLabelsNameProto> iterable =
new Iterable<NodeIdToLabelsNameProto>() {
@Override
public Iterator<NodeIdToLabelsNameProto> iterator() {
return new Iterator<NodeIdToLabelsNameProto>() {
Iterator<Entry<NodeId, Set<String>>> iter = nodeIdToLabels
.entrySet().iterator();
@Override
public void remove() {
throw new UnsupportedOperationException();
}
@Override
public NodeIdToLabelsNameProto next() {
Entry<NodeId, Set<String>> now = iter.next();
return NodeIdToLabelsNameProto.newBuilder()
.setNodeId(convertToProtoFormat(now.getKey())).clearNodeLabels()
.addAllNodeLabels(now.getValue()).build();
}
@Override
public boolean hasNext() {
return iter.hasNext();
}
};
}
};
builder.addAllNodeToLabels(iterable);
}
private void mergeLocalToBuilder() {
if (this.nodeIdToLabels != null) {
addNodeToLabelsToProto();
}
}
private void mergeLocalToProto() {
if (viaProto)
maybeInitBuilder();
mergeLocalToBuilder();
proto = builder.build();
viaProto = true;
}
public ReplaceLabelsOnNodeRequestProto getProto() {
mergeLocalToProto();
proto = viaProto ? proto : builder.build();
viaProto = true;
return proto;
}
@Override
public Map<NodeId, Set<String>> getNodeToLabels() {
initNodeToLabels();
return this.nodeIdToLabels;
}
@Override
public void setNodeToLabels(Map<NodeId, Set<String>> map) {
initNodeToLabels();
nodeIdToLabels.clear();
nodeIdToLabels.putAll(map);
}
private NodeIdProto convertToProtoFormat(NodeId t) {
return ((NodeIdPBImpl) t).getProto();
}
@Override
public int hashCode() {
assert false : "hashCode not designed";
return 0;
}
@Override
public boolean equals(Object other) {
if (other == null)
return false;
if (other.getClass().isAssignableFrom(this.getClass())) {
return this.getProto().equals(this.getClass().cast(other).getProto());
}
return false;
}
}
| 5,316 | 30.461538 | 116 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RefreshNodesResponsePBImpl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshNodesResponseProto;
import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshNodesResponse;
import com.google.protobuf.TextFormat;
@Private
@Unstable
public class RefreshNodesResponsePBImpl extends RefreshNodesResponse {
RefreshNodesResponseProto proto = RefreshNodesResponseProto.getDefaultInstance();
RefreshNodesResponseProto.Builder builder = null;
boolean viaProto = false;
public RefreshNodesResponsePBImpl() {
builder = RefreshNodesResponseProto.newBuilder();
}
public RefreshNodesResponsePBImpl(RefreshNodesResponseProto proto) {
this.proto = proto;
viaProto = true;
}
public RefreshNodesResponseProto getProto() {
proto = viaProto ? proto : builder.build();
viaProto = true;
return proto;
}
@Override
public int hashCode() {
return getProto().hashCode();
}
@Override
public boolean equals(Object other) {
if (other == null)
return false;
if (other.getClass().isAssignableFrom(this.getClass())) {
return this.getProto().equals(this.getClass().cast(other).getProto());
}
return false;
}
@Override
public String toString() {
return TextFormat.shortDebugString(getProto());
}
}
| 2,277 | 31.084507 | 101 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/UpdateNodeResourceRequestPBImpl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.api.records.ResourceOption;
import org.apache.hadoop.yarn.api.records.impl.pb.NodeIdPBImpl;
import org.apache.hadoop.yarn.api.records.impl.pb.ResourceOptionPBImpl;
import org.apache.hadoop.yarn.proto.YarnProtos.NodeIdProto;
import org.apache.hadoop.yarn.proto.YarnProtos.NodeResourceMapProto;
import org.apache.hadoop.yarn.proto.YarnProtos.ResourceOptionProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.UpdateNodeResourceRequestProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.UpdateNodeResourceRequestProtoOrBuilder;
import org.apache.hadoop.yarn.server.api.protocolrecords.UpdateNodeResourceRequest;
public class UpdateNodeResourceRequestPBImpl extends UpdateNodeResourceRequest {
UpdateNodeResourceRequestProto proto = UpdateNodeResourceRequestProto.getDefaultInstance();
UpdateNodeResourceRequestProto.Builder builder = null;
boolean viaProto = false;
Map<NodeId, ResourceOption> nodeResourceMap = null;
public UpdateNodeResourceRequestPBImpl() {
builder = UpdateNodeResourceRequestProto.newBuilder();
}
public UpdateNodeResourceRequestPBImpl(UpdateNodeResourceRequestProto proto) {
this.proto = proto;
viaProto = true;
}
@Override
public Map<NodeId, ResourceOption> getNodeResourceMap() {
initNodeResourceMap();
return this.nodeResourceMap;
}
@Override
public void setNodeResourceMap(Map<NodeId, ResourceOption> nodeResourceMap) {
if (nodeResourceMap == null) {
return;
}
initNodeResourceMap();
this.nodeResourceMap.clear();
this.nodeResourceMap.putAll(nodeResourceMap);
}
public UpdateNodeResourceRequestProto getProto() {
mergeLocalToProto();
proto = viaProto ? proto : builder.build();
viaProto = true;
return proto;
}
private void mergeLocalToBuilder() {
if (this.nodeResourceMap != null) {
addNodeResourceMap();
}
}
private void mergeLocalToProto() {
if (viaProto)
maybeInitBuilder();
mergeLocalToBuilder();
proto = builder.build();
viaProto = true;
}
private void initNodeResourceMap() {
if (this.nodeResourceMap != null) {
return;
}
UpdateNodeResourceRequestProtoOrBuilder p = viaProto ? proto : builder;
List<NodeResourceMapProto> list = p.getNodeResourceMapList();
this.nodeResourceMap = new HashMap<NodeId, ResourceOption>(list
.size());
for (NodeResourceMapProto nodeResourceProto : list) {
this.nodeResourceMap.put(convertFromProtoFormat(nodeResourceProto.getNodeId()),
convertFromProtoFormat(nodeResourceProto.getResourceOption()));
}
}
private void maybeInitBuilder() {
if (viaProto || builder == null) {
builder = UpdateNodeResourceRequestProto.newBuilder(proto);
}
viaProto = false;
}
private NodeIdProto convertToProtoFormat(NodeId nodeId) {
return ((NodeIdPBImpl)nodeId).getProto();
}
private NodeId convertFromProtoFormat(NodeIdProto proto) {
return new NodeIdPBImpl(proto);
}
private ResourceOptionPBImpl convertFromProtoFormat(ResourceOptionProto c) {
return new ResourceOptionPBImpl(c);
}
private ResourceOptionProto convertToProtoFormat(ResourceOption c) {
return ((ResourceOptionPBImpl)c).getProto();
}
private void addNodeResourceMap() {
maybeInitBuilder();
builder.clearNodeResourceMap();
if (nodeResourceMap == null) {
return;
}
Iterable<? extends NodeResourceMapProto> values
= new Iterable<NodeResourceMapProto>() {
@Override
public Iterator<NodeResourceMapProto> iterator() {
return new Iterator<NodeResourceMapProto>() {
Iterator<NodeId> nodeIterator = nodeResourceMap
.keySet().iterator();
@Override
public boolean hasNext() {
return nodeIterator.hasNext();
}
@Override
public NodeResourceMapProto next() {
NodeId nodeId = nodeIterator.next();
return NodeResourceMapProto.newBuilder().setNodeId(
convertToProtoFormat(nodeId)).setResourceOption(
convertToProtoFormat(nodeResourceMap.get(nodeId))).build();
}
@Override
public void remove() {
throw new UnsupportedOperationException();
}
};
}
};
this.builder.addAllNodeResourceMap(values);
}
@Override
public int hashCode() {
return getProto().hashCode();
}
@Override
public boolean equals(Object other) {
if (other == null)
return false;
if (other.getClass().isAssignableFrom(this.getClass())) {
return this.getProto().equals(this.getClass().cast(other).getProto());
}
return false;
}
}
| 5,837 | 31.254144 | 115 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RefreshUserToGroupsMappingsResponsePBImpl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshUserToGroupsMappingsResponseProto;
import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshUserToGroupsMappingsResponse;
import com.google.protobuf.TextFormat;
@Private
@Unstable
public class RefreshUserToGroupsMappingsResponsePBImpl extends RefreshUserToGroupsMappingsResponse {
RefreshUserToGroupsMappingsResponseProto proto = RefreshUserToGroupsMappingsResponseProto.getDefaultInstance();
RefreshUserToGroupsMappingsResponseProto.Builder builder = null;
boolean viaProto = false;
public RefreshUserToGroupsMappingsResponsePBImpl() {
builder = RefreshUserToGroupsMappingsResponseProto.newBuilder();
}
public RefreshUserToGroupsMappingsResponsePBImpl(RefreshUserToGroupsMappingsResponseProto proto) {
this.proto = proto;
viaProto = true;
}
public RefreshUserToGroupsMappingsResponseProto getProto() {
proto = viaProto ? proto : builder.build();
viaProto = true;
return proto;
}
@Override
public int hashCode() {
return getProto().hashCode();
}
@Override
public boolean equals(Object other) {
if (other == null)
return false;
if (other.getClass().isAssignableFrom(this.getClass())) {
return this.getProto().equals(this.getClass().cast(other).getProto());
}
return false;
}
@Override
public String toString() {
return TextFormat.shortDebugString(getProto());
}
}
| 2,457 | 33.619718 | 116 |
java
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.