repo
stringlengths 1
191
⌀ | file
stringlengths 23
351
| code
stringlengths 0
5.32M
| file_length
int64 0
5.32M
| avg_line_length
float64 0
2.9k
| max_line_length
int64 0
288k
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RefreshNodesRequestPBImpl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.yarn.api.records.DecommissionType;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.DecommissionTypeProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshNodesRequestProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshNodesRequestProtoOrBuilder;
import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshNodesRequest;
import com.google.protobuf.TextFormat;
@Private
@Unstable
public class RefreshNodesRequestPBImpl extends RefreshNodesRequest {
RefreshNodesRequestProto proto = RefreshNodesRequestProto.getDefaultInstance();
RefreshNodesRequestProto.Builder builder = null;
boolean viaProto = false;
private DecommissionType decommissionType;
public RefreshNodesRequestPBImpl() {
builder = RefreshNodesRequestProto.newBuilder();
}
public RefreshNodesRequestPBImpl(RefreshNodesRequestProto proto) {
this.proto = proto;
viaProto = true;
}
public synchronized RefreshNodesRequestProto getProto() {
mergeLocalToProto();
proto = viaProto ? proto : builder.build();
viaProto = true;
return proto;
}
private synchronized void mergeLocalToProto() {
if (viaProto)
maybeInitBuilder();
mergeLocalToBuilder();
proto = builder.build();
viaProto = true;
}
private void mergeLocalToBuilder() {
if (this.decommissionType != null) {
builder.setDecommissionType(convertToProtoFormat(this.decommissionType));
}
}
private synchronized void maybeInitBuilder() {
if (viaProto || builder == null) {
builder = RefreshNodesRequestProto.newBuilder(proto);
}
viaProto = false;
}
@Override
public int hashCode() {
return getProto().hashCode();
}
@Override
public boolean equals(Object other) {
if (other == null)
return false;
if (other.getClass().isAssignableFrom(this.getClass())) {
return this.getProto().equals(this.getClass().cast(other).getProto());
}
return false;
}
@Override
public String toString() {
return TextFormat.shortDebugString(getProto());
}
@Override
public synchronized void setDecommissionType(
DecommissionType decommissionType) {
maybeInitBuilder();
this.decommissionType = decommissionType;
mergeLocalToBuilder();
}
@Override
public synchronized DecommissionType getDecommissionType() {
RefreshNodesRequestProtoOrBuilder p = viaProto ? proto : builder;
return convertFromProtoFormat(p.getDecommissionType());
}
private DecommissionType convertFromProtoFormat(DecommissionTypeProto p) {
return DecommissionType.valueOf(p.name());
}
private DecommissionTypeProto convertToProtoFormat(DecommissionType t) {
return DecommissionTypeProto.valueOf(t.name());
}
}
| 3,818 | 31.092437 | 109 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RefreshSuperUserGroupsConfigurationRequestPBImpl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshSuperUserGroupsConfigurationRequestProto;
import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshSuperUserGroupsConfigurationRequest;
import com.google.protobuf.TextFormat;
@Private
@Unstable
public class RefreshSuperUserGroupsConfigurationRequestPBImpl
extends RefreshSuperUserGroupsConfigurationRequest {
RefreshSuperUserGroupsConfigurationRequestProto proto = RefreshSuperUserGroupsConfigurationRequestProto.getDefaultInstance();
RefreshSuperUserGroupsConfigurationRequestProto.Builder builder = null;
boolean viaProto = false;
public RefreshSuperUserGroupsConfigurationRequestPBImpl() {
builder = RefreshSuperUserGroupsConfigurationRequestProto.newBuilder();
}
public RefreshSuperUserGroupsConfigurationRequestPBImpl(RefreshSuperUserGroupsConfigurationRequestProto proto) {
this.proto = proto;
viaProto = true;
}
public RefreshSuperUserGroupsConfigurationRequestProto getProto() {
proto = viaProto ? proto : builder.build();
viaProto = true;
return proto;
}
@Override
public int hashCode() {
return getProto().hashCode();
}
@Override
public boolean equals(Object other) {
if (other == null)
return false;
if (other.getClass().isAssignableFrom(this.getClass())) {
return this.getProto().equals(this.getClass().cast(other).getProto());
}
return false;
}
@Override
public String toString() {
return TextFormat.shortDebugString(getProto());
}
}
| 2,542 | 34.319444 | 127 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/UpdateNodeResourceResponsePBImpl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.UpdateNodeResourceResponseProto;
import org.apache.hadoop.yarn.server.api.protocolrecords.UpdateNodeResourceResponse;
public class UpdateNodeResourceResponsePBImpl extends UpdateNodeResourceResponse {
UpdateNodeResourceResponseProto proto = UpdateNodeResourceResponseProto.getDefaultInstance();
UpdateNodeResourceResponseProto.Builder builder = null;
boolean viaProto = false;
public UpdateNodeResourceResponsePBImpl() {
builder = UpdateNodeResourceResponseProto.newBuilder();
}
public UpdateNodeResourceResponsePBImpl(
UpdateNodeResourceResponseProto proto) {
this.proto = proto;
viaProto = true;
}
public UpdateNodeResourceResponseProto getProto() {
proto = viaProto ? proto : builder.build();
viaProto = true;
return proto;
}
@Override
public int hashCode() {
return getProto().hashCode();
}
@Override
public boolean equals(Object other) {
if (other == null)
return false;
if (other.getClass().isAssignableFrom(this.getClass())) {
return this.getProto().equals(this.getClass().cast(other).getProto());
}
return false;
}
@Override
public String toString() {
return getProto().toString().replaceAll("\\n", ", ")
.replaceAll("\\s+", " ");
}
}
| 2,216 | 32.089552 | 107 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RefreshSuperUserGroupsConfigurationResponsePBImpl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshSuperUserGroupsConfigurationResponseProto;
import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshSuperUserGroupsConfigurationResponse;
import com.google.protobuf.TextFormat;
@Private
@Unstable
public class RefreshSuperUserGroupsConfigurationResponsePBImpl extends RefreshSuperUserGroupsConfigurationResponse {
RefreshSuperUserGroupsConfigurationResponseProto proto = RefreshSuperUserGroupsConfigurationResponseProto.getDefaultInstance();
RefreshSuperUserGroupsConfigurationResponseProto.Builder builder = null;
boolean viaProto = false;
public RefreshSuperUserGroupsConfigurationResponsePBImpl() {
builder = RefreshSuperUserGroupsConfigurationResponseProto.newBuilder();
}
public RefreshSuperUserGroupsConfigurationResponsePBImpl(RefreshSuperUserGroupsConfigurationResponseProto proto) {
this.proto = proto;
viaProto = true;
}
public RefreshSuperUserGroupsConfigurationResponseProto getProto() {
proto = viaProto ? proto : builder.build();
viaProto = true;
return proto;
}
@Override
public int hashCode() {
return getProto().hashCode();
}
@Override
public boolean equals(Object other) {
if (other == null)
return false;
if (other.getClass().isAssignableFrom(this.getClass())) {
return this.getProto().equals(this.getClass().cast(other).getProto());
}
return false;
}
@Override
public String toString() {
return TextFormat.shortDebugString(getProto());
}
}
| 2,553 | 34.971831 | 129 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RefreshQueuesResponsePBImpl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshQueuesResponseProto;
import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshQueuesResponse;
import com.google.protobuf.TextFormat;
@Private
@Unstable
public class RefreshQueuesResponsePBImpl extends RefreshQueuesResponse {
RefreshQueuesResponseProto proto = RefreshQueuesResponseProto.getDefaultInstance();
RefreshQueuesResponseProto.Builder builder = null;
boolean viaProto = false;
public RefreshQueuesResponsePBImpl() {
builder = RefreshQueuesResponseProto.newBuilder();
}
public RefreshQueuesResponsePBImpl(RefreshQueuesResponseProto proto) {
this.proto = proto;
viaProto = true;
}
public RefreshQueuesResponseProto getProto() {
proto = viaProto ? proto : builder.build();
viaProto = true;
return proto;
}
@Override
public int hashCode() {
return getProto().hashCode();
}
@Override
public boolean equals(Object other) {
if (other == null)
return false;
if (other.getClass().isAssignableFrom(this.getClass())) {
return this.getProto().equals(this.getClass().cast(other).getProto());
}
return false;
}
@Override
public String toString() {
return TextFormat.shortDebugString(getProto());
}
}
| 2,289 | 31.253521 | 102 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RemoveFromClusterNodeLabelsRequestPBImpl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb;
import java.util.HashSet;
import java.util.Set;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RemoveFromClusterNodeLabelsRequestProtoOrBuilder;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RemoveFromClusterNodeLabelsRequestProto;
import org.apache.hadoop.yarn.server.api.protocolrecords.RemoveFromClusterNodeLabelsRequest;
public class RemoveFromClusterNodeLabelsRequestPBImpl extends
RemoveFromClusterNodeLabelsRequest {
Set<String> labels;
RemoveFromClusterNodeLabelsRequestProto proto =
RemoveFromClusterNodeLabelsRequestProto.getDefaultInstance();
RemoveFromClusterNodeLabelsRequestProto.Builder builder = null;
boolean viaProto = false;
public RemoveFromClusterNodeLabelsRequestPBImpl() {
this.builder = RemoveFromClusterNodeLabelsRequestProto.newBuilder();
}
public RemoveFromClusterNodeLabelsRequestPBImpl(
RemoveFromClusterNodeLabelsRequestProto proto) {
this.proto = proto;
viaProto = true;
}
private void maybeInitBuilder() {
if (viaProto || builder == null) {
builder = RemoveFromClusterNodeLabelsRequestProto.newBuilder(proto);
}
viaProto = false;
}
private void mergeLocalToBuilder() {
if (this.labels != null && !this.labels.isEmpty()) {
builder.clearNodeLabels();
builder.addAllNodeLabels(this.labels);
}
}
private void mergeLocalToProto() {
if (viaProto)
maybeInitBuilder();
mergeLocalToBuilder();
proto = builder.build();
viaProto = true;
}
public RemoveFromClusterNodeLabelsRequestProto getProto() {
mergeLocalToProto();
proto = viaProto ? proto : builder.build();
viaProto = true;
return proto;
}
private void initNodeLabels() {
if (this.labels != null) {
return;
}
RemoveFromClusterNodeLabelsRequestProtoOrBuilder p =
viaProto ? proto : builder;
this.labels = new HashSet<String>();
this.labels.addAll(p.getNodeLabelsList());
}
@Override
public void setNodeLabels(Set<String> labels) {
maybeInitBuilder();
if (labels == null || labels.isEmpty()) {
builder.clearNodeLabels();
}
this.labels = labels;
}
@Override
public Set<String> getNodeLabels() {
initNodeLabels();
return this.labels;
}
@Override
public int hashCode() {
assert false : "hashCode not designed";
return 0;
}
@Override
public boolean equals(Object other) {
if (other == null)
return false;
if (other.getClass().isAssignableFrom(this.getClass())) {
return this.getProto().equals(this.getClass().cast(other).getProto());
}
return false;
}
}
| 3,538 | 29.508621 | 124 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/AddToClusterNodeLabelsRequestPBImpl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import org.apache.hadoop.yarn.api.records.NodeLabel;
import org.apache.hadoop.yarn.api.records.impl.pb.NodeLabelPBImpl;
import org.apache.hadoop.yarn.proto.YarnProtos.NodeLabelProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.AddToClusterNodeLabelsRequestProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.AddToClusterNodeLabelsRequestProtoOrBuilder;
import org.apache.hadoop.yarn.server.api.protocolrecords.AddToClusterNodeLabelsRequest;
public class AddToClusterNodeLabelsRequestPBImpl extends
AddToClusterNodeLabelsRequest {
AddToClusterNodeLabelsRequestProto proto = AddToClusterNodeLabelsRequestProto
.getDefaultInstance();
AddToClusterNodeLabelsRequestProto.Builder builder = null;
private List<NodeLabel> updatedNodeLabels;
boolean viaProto = false;
public AddToClusterNodeLabelsRequestPBImpl() {
builder = AddToClusterNodeLabelsRequestProto.newBuilder();
}
public AddToClusterNodeLabelsRequestPBImpl(
AddToClusterNodeLabelsRequestProto proto) {
this.proto = proto;
viaProto = true;
}
public AddToClusterNodeLabelsRequestProto getProto() {
mergeLocalToProto();
proto = viaProto ? proto : builder.build();
viaProto = true;
return proto;
}
private void mergeLocalToProto() {
if (viaProto)
maybeInitBuilder();
mergeLocalToBuilder();
proto = builder.build();
viaProto = true;
}
private void mergeLocalToBuilder() {
if (this.updatedNodeLabels != null) {
addNodeLabelsToProto();
}
}
private void addNodeLabelsToProto() {
maybeInitBuilder();
builder.clearNodeLabels();
List<NodeLabelProto> protoList = new ArrayList<NodeLabelProto>();
for (NodeLabel r : this.updatedNodeLabels) {
protoList.add(convertToProtoFormat(r));
}
builder.addAllNodeLabels(protoList);
}
@Override
public boolean equals(Object other) {
if (other == null)
return false;
if (other.getClass().isAssignableFrom(this.getClass())) {
return this.getProto().equals(this.getClass().cast(other).getProto());
}
return false;
}
@Override
public int hashCode() {
assert false : "hashCode not designed";
return 0;
}
private void maybeInitBuilder() {
if (viaProto || builder == null) {
builder = AddToClusterNodeLabelsRequestProto.newBuilder(proto);
}
viaProto = false;
}
@Override
public void setNodeLabels(List<NodeLabel> updatedNodeLabels) {
maybeInitBuilder();
this.updatedNodeLabels = new ArrayList<>();
if (updatedNodeLabels == null) {
builder.clearNodeLabels();
return;
}
this.updatedNodeLabels.addAll(updatedNodeLabels);
}
private void initLocalNodeLabels() {
AddToClusterNodeLabelsRequestProtoOrBuilder p = viaProto ? proto : builder;
List<NodeLabelProto> attributesProtoList = p.getNodeLabelsList();
this.updatedNodeLabels = new ArrayList<NodeLabel>();
for (NodeLabelProto r : attributesProtoList) {
this.updatedNodeLabels.add(convertFromProtoFormat(r));
}
}
@Override
public List<NodeLabel> getNodeLabels() {
if (this.updatedNodeLabels != null) {
return this.updatedNodeLabels;
}
initLocalNodeLabels();
return this.updatedNodeLabels;
}
private NodeLabel convertFromProtoFormat(NodeLabelProto p) {
return new NodeLabelPBImpl(p);
}
private NodeLabelProto convertToProtoFormat(NodeLabel t) {
return ((NodeLabelPBImpl) t).getProto();
}
@Override
public String toString() {
return getProto().toString();
}
}
| 4,542 | 30.116438 | 119 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RefreshAdminAclsRequestPBImpl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshAdminAclsRequestProto;
import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshAdminAclsRequest;
import com.google.protobuf.TextFormat;
@Private
@Unstable
public class RefreshAdminAclsRequestPBImpl
extends RefreshAdminAclsRequest {
RefreshAdminAclsRequestProto proto = RefreshAdminAclsRequestProto.getDefaultInstance();
RefreshAdminAclsRequestProto.Builder builder = null;
boolean viaProto = false;
public RefreshAdminAclsRequestPBImpl() {
builder = RefreshAdminAclsRequestProto.newBuilder();
}
public RefreshAdminAclsRequestPBImpl(RefreshAdminAclsRequestProto proto) {
this.proto = proto;
viaProto = true;
}
public RefreshAdminAclsRequestProto getProto() {
proto = viaProto ? proto : builder.build();
viaProto = true;
return proto;
}
@Override
public int hashCode() {
return getProto().hashCode();
}
@Override
public boolean equals(Object other) {
if (other == null)
return false;
if (other.getClass().isAssignableFrom(this.getClass())) {
return this.getProto().equals(this.getClass().cast(other).getProto());
}
return false;
}
@Override
public String toString() {
return TextFormat.shortDebugString(getProto());
}
}
| 2,320 | 31.236111 | 104 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RefreshUserToGroupsMappingsRequestPBImpl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshUserToGroupsMappingsRequestProto;
import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshUserToGroupsMappingsRequest;
import com.google.protobuf.TextFormat;
@Private
@Unstable
public class RefreshUserToGroupsMappingsRequestPBImpl
extends RefreshUserToGroupsMappingsRequest {
RefreshUserToGroupsMappingsRequestProto proto = RefreshUserToGroupsMappingsRequestProto.getDefaultInstance();
RefreshUserToGroupsMappingsRequestProto.Builder builder = null;
boolean viaProto = false;
public RefreshUserToGroupsMappingsRequestPBImpl() {
builder = RefreshUserToGroupsMappingsRequestProto.newBuilder();
}
public RefreshUserToGroupsMappingsRequestPBImpl(RefreshUserToGroupsMappingsRequestProto proto) {
this.proto = proto;
viaProto = true;
}
public RefreshUserToGroupsMappingsRequestProto getProto() {
proto = viaProto ? proto : builder.build();
viaProto = true;
return proto;
}
@Override
public int hashCode() {
return getProto().hashCode();
}
@Override
public boolean equals(Object other) {
if (other == null)
return false;
if (other.getClass().isAssignableFrom(this.getClass())) {
return this.getProto().equals(this.getClass().cast(other).getProto());
}
return false;
}
@Override
public String toString() {
return TextFormat.shortDebugString(getProto());
}
}
| 2,446 | 32.986111 | 115 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RefreshAdminAclsResponsePBImpl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshAdminAclsResponseProto;
import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshAdminAclsResponse;
import com.google.protobuf.TextFormat;
@Private
@Unstable
public class RefreshAdminAclsResponsePBImpl extends RefreshAdminAclsResponse {
RefreshAdminAclsResponseProto proto = RefreshAdminAclsResponseProto.getDefaultInstance();
RefreshAdminAclsResponseProto.Builder builder = null;
boolean viaProto = false;
public RefreshAdminAclsResponsePBImpl() {
builder = RefreshAdminAclsResponseProto.newBuilder();
}
public RefreshAdminAclsResponsePBImpl(RefreshAdminAclsResponseProto proto) {
this.proto = proto;
viaProto = true;
}
public RefreshAdminAclsResponseProto getProto() {
proto = viaProto ? proto : builder.build();
viaProto = true;
return proto;
}
@Override
public int hashCode() {
return getProto().hashCode();
}
@Override
public boolean equals(Object other) {
if (other == null)
return false;
if (other.getClass().isAssignableFrom(this.getClass())) {
return this.getProto().equals(this.getClass().cast(other).getProto());
}
return false;
}
@Override
public String toString() {
return TextFormat.shortDebugString(getProto());
}
}
| 2,331 | 31.84507 | 105 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/ReplaceLabelsOnNodeResponsePBImpl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.ReplaceLabelsOnNodeResponseProto;
import org.apache.hadoop.yarn.server.api.protocolrecords.ReplaceLabelsOnNodeResponse;
import com.google.protobuf.TextFormat;
public class ReplaceLabelsOnNodeResponsePBImpl extends
ReplaceLabelsOnNodeResponse {
ReplaceLabelsOnNodeResponseProto proto = ReplaceLabelsOnNodeResponseProto
.getDefaultInstance();
ReplaceLabelsOnNodeResponseProto.Builder builder = null;
boolean viaProto = false;
public ReplaceLabelsOnNodeResponsePBImpl() {
builder = ReplaceLabelsOnNodeResponseProto.newBuilder();
}
public ReplaceLabelsOnNodeResponsePBImpl(
ReplaceLabelsOnNodeResponseProto proto) {
this.proto = proto;
viaProto = true;
}
public ReplaceLabelsOnNodeResponseProto getProto() {
proto = viaProto ? proto : builder.build();
viaProto = true;
return proto;
}
@Override
public int hashCode() {
return getProto().hashCode();
}
@Override
public boolean equals(Object other) {
if (other == null)
return false;
if (other.getClass().isAssignableFrom(this.getClass())) {
return this.getProto().equals(this.getClass().cast(other).getProto());
}
return false;
}
@Override
public String toString() {
return TextFormat.shortDebugString(getProto());
}
}
| 2,236 | 30.957143 | 108 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RunSharedCacheCleanerTaskResponsePBImpl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb;
import org.apache.hadoop.yarn.server.api.protocolrecords.RunSharedCacheCleanerTaskResponse;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.RunSharedCacheCleanerTaskResponseProto;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.RunSharedCacheCleanerTaskResponseProtoOrBuilder;
public class RunSharedCacheCleanerTaskResponsePBImpl extends
RunSharedCacheCleanerTaskResponse {
RunSharedCacheCleanerTaskResponseProto proto =
RunSharedCacheCleanerTaskResponseProto.getDefaultInstance();
RunSharedCacheCleanerTaskResponseProto.Builder builder = null;
boolean viaProto = false;
public RunSharedCacheCleanerTaskResponsePBImpl() {
builder = RunSharedCacheCleanerTaskResponseProto.newBuilder();
}
public RunSharedCacheCleanerTaskResponsePBImpl(
RunSharedCacheCleanerTaskResponseProto proto) {
this.proto = proto;
viaProto = true;
}
@Override
public boolean getAccepted() {
RunSharedCacheCleanerTaskResponseProtoOrBuilder p = viaProto ? proto : builder;
return (p.hasAccepted()) ? p.getAccepted() : false;
}
@Override
public void setAccepted(boolean b) {
maybeInitBuilder();
builder.setAccepted(b);
}
public RunSharedCacheCleanerTaskResponseProto getProto() {
proto = viaProto ? proto : builder.build();
viaProto = true;
return proto;
}
private void maybeInitBuilder() {
if (viaProto || builder == null) {
builder = RunSharedCacheCleanerTaskResponseProto.newBuilder(proto);
}
viaProto = false;
}
}
| 2,390 | 34.686567 | 102 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RefreshServiceAclsResponsePBImpl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshServiceAclsResponseProto;
import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshServiceAclsResponse;
import com.google.protobuf.TextFormat;
@Private
@Unstable
public class RefreshServiceAclsResponsePBImpl extends
RefreshServiceAclsResponse {
RefreshServiceAclsResponseProto proto =
RefreshServiceAclsResponseProto.getDefaultInstance();
RefreshServiceAclsResponseProto.Builder builder = null;
boolean viaProto = false;
public RefreshServiceAclsResponsePBImpl() {
builder = RefreshServiceAclsResponseProto.newBuilder();
}
public RefreshServiceAclsResponsePBImpl(
RefreshServiceAclsResponseProto proto) {
this.proto = proto;
viaProto = true;
}
public RefreshServiceAclsResponseProto getProto() {
proto = viaProto ? proto : builder.build();
viaProto = true;
return proto;
}
@Override
public int hashCode() {
return getProto().hashCode();
}
@Override
public boolean equals(Object other) {
if (other == null)
return false;
if (other.getClass().isAssignableFrom(this.getClass())) {
return this.getProto().equals(this.getClass().cast(other).getProto());
}
return false;
}
@Override
public String toString() {
return TextFormat.shortDebugString(getProto());
}
}
| 2,367 | 31 | 107 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/AddToClusterNodeLabelsResponsePBImpl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.AddToClusterNodeLabelsResponseProto;
import org.apache.hadoop.yarn.server.api.protocolrecords.AddToClusterNodeLabelsResponse;
import com.google.protobuf.TextFormat;
public class AddToClusterNodeLabelsResponsePBImpl extends
AddToClusterNodeLabelsResponse {
AddToClusterNodeLabelsResponseProto proto = AddToClusterNodeLabelsResponseProto
.getDefaultInstance();
AddToClusterNodeLabelsResponseProto.Builder builder = null;
boolean viaProto = false;
public AddToClusterNodeLabelsResponsePBImpl() {
builder = AddToClusterNodeLabelsResponseProto.newBuilder();
}
public AddToClusterNodeLabelsResponsePBImpl(
AddToClusterNodeLabelsResponseProto proto) {
this.proto = proto;
viaProto = true;
}
public AddToClusterNodeLabelsResponseProto getProto() {
proto = viaProto ? proto : builder.build();
viaProto = true;
return proto;
}
@Override
public int hashCode() {
return getProto().hashCode();
}
@Override
public boolean equals(Object other) {
if (other == null)
return false;
if (other.getClass().isAssignableFrom(this.getClass())) {
return this.getProto().equals(this.getClass().cast(other).getProto());
}
return false;
}
@Override
public String toString() {
return TextFormat.shortDebugString(getProto());
}
}
| 2,272 | 31.471429 | 111 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/CheckForDecommissioningNodesResponsePBImpl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.api.records.impl.pb.NodeIdPBImpl;
import org.apache.hadoop.yarn.proto.YarnProtos.NodeIdProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.CheckForDecommissioningNodesResponseProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.CheckForDecommissioningNodesResponseProtoOrBuilder;
import org.apache.hadoop.yarn.server.api.protocolrecords.CheckForDecommissioningNodesResponse;
import com.google.protobuf.TextFormat;
@Private
@Unstable
public class CheckForDecommissioningNodesResponsePBImpl extends
CheckForDecommissioningNodesResponse {
CheckForDecommissioningNodesResponseProto proto = CheckForDecommissioningNodesResponseProto
.getDefaultInstance();
CheckForDecommissioningNodesResponseProto.Builder builder = null;
boolean viaProto = false;
private Set<NodeId> decommissioningNodes;
public CheckForDecommissioningNodesResponsePBImpl() {
builder = CheckForDecommissioningNodesResponseProto.newBuilder();
}
public CheckForDecommissioningNodesResponsePBImpl(
CheckForDecommissioningNodesResponseProto proto) {
this.proto = proto;
viaProto = true;
}
public CheckForDecommissioningNodesResponseProto getProto() {
mergeLocalToProto();
proto = viaProto ? proto : builder.build();
viaProto = true;
return proto;
}
private void mergeLocalToProto() {
if (viaProto)
maybeInitBuilder();
mergeLocalToBuilder();
proto = builder.build();
viaProto = true;
}
private void maybeInitBuilder() {
if (viaProto || builder == null) {
builder = CheckForDecommissioningNodesResponseProto.newBuilder(proto);
}
viaProto = false;
}
private void mergeLocalToBuilder() {
if (this.decommissioningNodes != null) {
addDecommissioningNodesToProto();
}
}
private void addDecommissioningNodesToProto() {
maybeInitBuilder();
builder.clearDecommissioningNodes();
if (this.decommissioningNodes == null)
return;
Set<NodeIdProto> nodeIdProtos = new HashSet<NodeIdProto>();
for (NodeId nodeId : decommissioningNodes) {
nodeIdProtos.add(convertToProtoFormat(nodeId));
}
builder.addAllDecommissioningNodes(nodeIdProtos);
}
private NodeIdProto convertToProtoFormat(NodeId nodeId) {
return ((NodeIdPBImpl) nodeId).getProto();
}
@Override
public void setDecommissioningNodes(Set<NodeId> decommissioningNodes) {
maybeInitBuilder();
if (decommissioningNodes == null)
builder.clearDecommissioningNodes();
this.decommissioningNodes = decommissioningNodes;
}
@Override
public Set<NodeId> getDecommissioningNodes() {
initNodesDecommissioning();
return this.decommissioningNodes;
}
private void initNodesDecommissioning() {
if (this.decommissioningNodes != null) {
return;
}
CheckForDecommissioningNodesResponseProtoOrBuilder p = viaProto ? proto
: builder;
List<NodeIdProto> nodeIds = p.getDecommissioningNodesList();
this.decommissioningNodes = new HashSet<NodeId>();
for (NodeIdProto nodeIdProto : nodeIds) {
this.decommissioningNodes.add(convertFromProtoFormat(nodeIdProto));
}
}
private NodeId convertFromProtoFormat(NodeIdProto nodeIdProto) {
return new NodeIdPBImpl(nodeIdProto);
}
@Override
public int hashCode() {
return getProto().hashCode();
}
@Override
public boolean equals(Object other) {
if (other == null)
return false;
if (other.getClass().isAssignableFrom(this.getClass())) {
return this.getProto().equals(this.getClass().cast(other).getProto());
}
return false;
}
@Override
public String toString() {
return TextFormat.shortDebugString(getProto());
}
}
| 4,896 | 31.006536 | 126 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RefreshServiceAclsRequestPBImpl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshServiceAclsRequestProto;
import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshServiceAclsRequest;
import com.google.protobuf.TextFormat;
@Private
@Unstable
public class RefreshServiceAclsRequestPBImpl extends RefreshServiceAclsRequest {
RefreshServiceAclsRequestProto proto =
RefreshServiceAclsRequestProto.getDefaultInstance();
RefreshServiceAclsRequestProto.Builder builder = null;
boolean viaProto = false;
public RefreshServiceAclsRequestPBImpl() {
builder = RefreshServiceAclsRequestProto.newBuilder();
}
public RefreshServiceAclsRequestPBImpl(
RefreshServiceAclsRequestProto proto) {
this.proto = proto;
viaProto = true;
}
public RefreshServiceAclsRequestProto getProto() {
proto = viaProto ? proto : builder.build();
viaProto = true;
return proto;
}
@Override
public int hashCode() {
return getProto().hashCode();
}
@Override
public boolean equals(Object other) {
if (other == null)
return false;
if (other.getClass().isAssignableFrom(this.getClass())) {
return this.getProto().equals(this.getClass().cast(other).getProto());
}
return false;
}
@Override
public String toString() {
return TextFormat.shortDebugString(getProto());
}
}
| 2,351 | 31.219178 | 106 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/impl/pb/service/SCMAdminProtocolPBServiceImpl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.api.impl.pb.service;
import java.io.IOException;
import org.apache.hadoop.yarn.server.api.SCMAdminProtocol;
import org.apache.hadoop.yarn.server.api.SCMAdminProtocolPB;
import org.apache.hadoop.yarn.server.api.protocolrecords.RunSharedCacheCleanerTaskResponse;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RunSharedCacheCleanerTaskRequestPBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RunSharedCacheCleanerTaskResponsePBImpl;
import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.RunSharedCacheCleanerTaskRequestProto;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.RunSharedCacheCleanerTaskResponseProto;
import com.google.protobuf.RpcController;
import com.google.protobuf.ServiceException;
public class SCMAdminProtocolPBServiceImpl implements SCMAdminProtocolPB {
private SCMAdminProtocol real;
public SCMAdminProtocolPBServiceImpl(SCMAdminProtocol impl) {
this.real = impl;
}
@Override
public RunSharedCacheCleanerTaskResponseProto runCleanerTask(RpcController controller,
RunSharedCacheCleanerTaskRequestProto proto) throws ServiceException {
RunSharedCacheCleanerTaskRequestPBImpl request =
new RunSharedCacheCleanerTaskRequestPBImpl(proto);
try {
RunSharedCacheCleanerTaskResponse response = real.runCleanerTask(request);
return ((RunSharedCacheCleanerTaskResponsePBImpl) response).getProto();
} catch (YarnException e) {
throw new ServiceException(e);
} catch (IOException e) {
throw new ServiceException(e);
}
}
}
| 2,462 | 41.465517 | 105 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/impl/pb/service/ResourceManagerAdministrationProtocolPBServiceImpl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.api.impl.pb.service;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.AddToClusterNodeLabelsRequestProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.AddToClusterNodeLabelsResponseProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.CheckForDecommissioningNodesRequestProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.CheckForDecommissioningNodesResponseProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.GetGroupsForUserRequestProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.GetGroupsForUserResponseProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshAdminAclsRequestProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshAdminAclsResponseProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshNodesRequestProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshNodesResponseProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshQueuesRequestProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshQueuesResponseProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshServiceAclsRequestProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshServiceAclsResponseProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshSuperUserGroupsConfigurationRequestProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshSuperUserGroupsConfigurationResponseProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshUserToGroupsMappingsRequestProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshUserToGroupsMappingsResponseProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RemoveFromClusterNodeLabelsRequestProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RemoveFromClusterNodeLabelsResponseProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.ReplaceLabelsOnNodeRequestProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.ReplaceLabelsOnNodeResponseProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.UpdateNodeResourceRequestProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.UpdateNodeResourceResponseProto;
import org.apache.hadoop.yarn.server.api.ResourceManagerAdministrationProtocol;
import org.apache.hadoop.yarn.server.api.ResourceManagerAdministrationProtocolPB;
import org.apache.hadoop.yarn.server.api.protocolrecords.AddToClusterNodeLabelsResponse;
import org.apache.hadoop.yarn.server.api.protocolrecords.CheckForDecommissioningNodesRequest;
import org.apache.hadoop.yarn.server.api.protocolrecords.CheckForDecommissioningNodesResponse;
import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshAdminAclsResponse;
import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshNodesResponse;
import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshQueuesResponse;
import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshServiceAclsResponse;
import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshSuperUserGroupsConfigurationResponse;
import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshUserToGroupsMappingsResponse;
import org.apache.hadoop.yarn.server.api.protocolrecords.RemoveFromClusterNodeLabelsResponse;
import org.apache.hadoop.yarn.server.api.protocolrecords.ReplaceLabelsOnNodeResponse;
import org.apache.hadoop.yarn.server.api.protocolrecords.UpdateNodeResourceResponse;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.AddToClusterNodeLabelsRequestPBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.AddToClusterNodeLabelsResponsePBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.CheckForDecommissioningNodesRequestPBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.CheckForDecommissioningNodesResponsePBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshAdminAclsRequestPBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshAdminAclsResponsePBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshNodesRequestPBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshNodesResponsePBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshQueuesRequestPBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshQueuesResponsePBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshServiceAclsRequestPBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshServiceAclsResponsePBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshSuperUserGroupsConfigurationRequestPBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshSuperUserGroupsConfigurationResponsePBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshUserToGroupsMappingsRequestPBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshUserToGroupsMappingsResponsePBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RemoveFromClusterNodeLabelsRequestPBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RemoveFromClusterNodeLabelsResponsePBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.ReplaceLabelsOnNodeRequestPBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.ReplaceLabelsOnNodeResponsePBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.UpdateNodeResourceRequestPBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.UpdateNodeResourceResponsePBImpl;
import com.google.protobuf.RpcController;
import com.google.protobuf.ServiceException;
@Private
public class ResourceManagerAdministrationProtocolPBServiceImpl implements ResourceManagerAdministrationProtocolPB {
private ResourceManagerAdministrationProtocol real;
public ResourceManagerAdministrationProtocolPBServiceImpl(ResourceManagerAdministrationProtocol impl) {
this.real = impl;
}
@Override
public RefreshQueuesResponseProto refreshQueues(RpcController controller,
RefreshQueuesRequestProto proto) throws ServiceException {
RefreshQueuesRequestPBImpl request = new RefreshQueuesRequestPBImpl(proto);
try {
RefreshQueuesResponse response = real.refreshQueues(request);
return ((RefreshQueuesResponsePBImpl)response).getProto();
} catch (YarnException e) {
throw new ServiceException(e);
} catch (IOException e) {
throw new ServiceException(e);
}
}
@Override
public RefreshAdminAclsResponseProto refreshAdminAcls(
RpcController controller, RefreshAdminAclsRequestProto proto)
throws ServiceException {
RefreshAdminAclsRequestPBImpl request =
new RefreshAdminAclsRequestPBImpl(proto);
try {
RefreshAdminAclsResponse response = real.refreshAdminAcls(request);
return ((RefreshAdminAclsResponsePBImpl)response).getProto();
} catch (YarnException e) {
throw new ServiceException(e);
} catch (IOException e) {
throw new ServiceException(e);
}
}
@Override
public RefreshNodesResponseProto refreshNodes(RpcController controller,
RefreshNodesRequestProto proto) throws ServiceException {
RefreshNodesRequestPBImpl request = new RefreshNodesRequestPBImpl(proto);
try {
RefreshNodesResponse response = real.refreshNodes(request);
return ((RefreshNodesResponsePBImpl)response).getProto();
} catch (YarnException e) {
throw new ServiceException(e);
} catch (IOException e) {
throw new ServiceException(e);
}
}
@Override
public RefreshSuperUserGroupsConfigurationResponseProto
refreshSuperUserGroupsConfiguration(
RpcController controller,
RefreshSuperUserGroupsConfigurationRequestProto proto)
throws ServiceException {
RefreshSuperUserGroupsConfigurationRequestPBImpl request =
new RefreshSuperUserGroupsConfigurationRequestPBImpl(proto);
try {
RefreshSuperUserGroupsConfigurationResponse response =
real.refreshSuperUserGroupsConfiguration(request);
return ((RefreshSuperUserGroupsConfigurationResponsePBImpl)response).getProto();
} catch (YarnException e) {
throw new ServiceException(e);
} catch (IOException e) {
throw new ServiceException(e);
}
}
@Override
public RefreshUserToGroupsMappingsResponseProto refreshUserToGroupsMappings(
RpcController controller, RefreshUserToGroupsMappingsRequestProto proto)
throws ServiceException {
RefreshUserToGroupsMappingsRequestPBImpl request =
new RefreshUserToGroupsMappingsRequestPBImpl(proto);
try {
RefreshUserToGroupsMappingsResponse response =
real.refreshUserToGroupsMappings(request);
return ((RefreshUserToGroupsMappingsResponsePBImpl)response).getProto();
} catch (YarnException e) {
throw new ServiceException(e);
} catch (IOException e) {
throw new ServiceException(e);
}
}
@Override
public RefreshServiceAclsResponseProto refreshServiceAcls(
RpcController controller, RefreshServiceAclsRequestProto proto)
throws ServiceException {
RefreshServiceAclsRequestPBImpl request =
new RefreshServiceAclsRequestPBImpl(proto);
try {
RefreshServiceAclsResponse response =
real.refreshServiceAcls(request);
return ((RefreshServiceAclsResponsePBImpl)response).getProto();
} catch (YarnException e) {
throw new ServiceException(e);
} catch (IOException e) {
throw new ServiceException(e);
}
}
@Override
public GetGroupsForUserResponseProto getGroupsForUser(
RpcController controller, GetGroupsForUserRequestProto request)
throws ServiceException {
String user = request.getUser();
try {
String[] groups = real.getGroupsForUser(user);
GetGroupsForUserResponseProto.Builder responseBuilder =
GetGroupsForUserResponseProto.newBuilder();
for (String group : groups) {
responseBuilder.addGroups(group);
}
return responseBuilder.build();
} catch (IOException e) {
throw new ServiceException(e);
}
}
@Override
public UpdateNodeResourceResponseProto updateNodeResource(RpcController controller,
UpdateNodeResourceRequestProto proto) throws ServiceException {
UpdateNodeResourceRequestPBImpl request =
new UpdateNodeResourceRequestPBImpl(proto);
try {
UpdateNodeResourceResponse response = real.updateNodeResource(request);
return ((UpdateNodeResourceResponsePBImpl)response).getProto();
} catch (YarnException e) {
throw new ServiceException(e);
} catch (IOException e) {
throw new ServiceException(e);
}
}
@Override
public AddToClusterNodeLabelsResponseProto addToClusterNodeLabels(
RpcController controller, AddToClusterNodeLabelsRequestProto proto)
throws ServiceException {
AddToClusterNodeLabelsRequestPBImpl request =
new AddToClusterNodeLabelsRequestPBImpl(proto);
try {
AddToClusterNodeLabelsResponse response =
real.addToClusterNodeLabels(request);
return ((AddToClusterNodeLabelsResponsePBImpl) response).getProto();
} catch (YarnException e) {
throw new ServiceException(e);
} catch (IOException e) {
throw new ServiceException(e);
}
}
@Override
public RemoveFromClusterNodeLabelsResponseProto removeFromClusterNodeLabels(
RpcController controller, RemoveFromClusterNodeLabelsRequestProto proto)
throws ServiceException {
RemoveFromClusterNodeLabelsRequestPBImpl request =
new RemoveFromClusterNodeLabelsRequestPBImpl(proto);
try {
RemoveFromClusterNodeLabelsResponse response =
real.removeFromClusterNodeLabels(request);
return ((RemoveFromClusterNodeLabelsResponsePBImpl) response).getProto();
} catch (YarnException e) {
throw new ServiceException(e);
} catch (IOException e) {
throw new ServiceException(e);
}
}
@Override
public ReplaceLabelsOnNodeResponseProto replaceLabelsOnNodes(
RpcController controller, ReplaceLabelsOnNodeRequestProto proto)
throws ServiceException {
ReplaceLabelsOnNodeRequestPBImpl request =
new ReplaceLabelsOnNodeRequestPBImpl(proto);
try {
ReplaceLabelsOnNodeResponse response = real.replaceLabelsOnNode(request);
return ((ReplaceLabelsOnNodeResponsePBImpl) response).getProto();
} catch (YarnException e) {
throw new ServiceException(e);
} catch (IOException e) {
throw new ServiceException(e);
}
}
@Override
public CheckForDecommissioningNodesResponseProto checkForDecommissioningNodes(
RpcController controller, CheckForDecommissioningNodesRequestProto proto)
throws ServiceException {
CheckForDecommissioningNodesRequest request = new CheckForDecommissioningNodesRequestPBImpl(
proto);
try {
CheckForDecommissioningNodesResponse response = real
.checkForDecommissioningNodes(request);
return ((CheckForDecommissioningNodesResponsePBImpl) response).getProto();
} catch (YarnException e) {
throw new ServiceException(e);
} catch (IOException e) {
throw new ServiceException(e);
}
}
}
| 14,824 | 49.254237 | 124 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/impl/pb/client/ResourceManagerAdministrationProtocolPBClientImpl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.api.impl.pb.client;
import java.io.Closeable;
import java.io.IOException;
import java.net.InetSocketAddress;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.ipc.ProtobufHelper;
import org.apache.hadoop.ipc.ProtobufRpcEngine;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.ipc.RPCUtil;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.AddToClusterNodeLabelsRequestProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.CheckForDecommissioningNodesRequestProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.GetGroupsForUserRequestProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.GetGroupsForUserResponseProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshAdminAclsRequestProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshNodesRequestProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshQueuesRequestProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshServiceAclsRequestProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshSuperUserGroupsConfigurationRequestProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshUserToGroupsMappingsRequestProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RemoveFromClusterNodeLabelsRequestProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.ReplaceLabelsOnNodeRequestProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.UpdateNodeResourceRequestProto;
import org.apache.hadoop.yarn.server.api.ResourceManagerAdministrationProtocol;
import org.apache.hadoop.yarn.server.api.ResourceManagerAdministrationProtocolPB;
import org.apache.hadoop.yarn.server.api.protocolrecords.AddToClusterNodeLabelsRequest;
import org.apache.hadoop.yarn.server.api.protocolrecords.AddToClusterNodeLabelsResponse;
import org.apache.hadoop.yarn.server.api.protocolrecords.CheckForDecommissioningNodesRequest;
import org.apache.hadoop.yarn.server.api.protocolrecords.CheckForDecommissioningNodesResponse;
import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshAdminAclsRequest;
import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshAdminAclsResponse;
import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshNodesRequest;
import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshNodesResponse;
import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshQueuesRequest;
import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshQueuesResponse;
import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshServiceAclsRequest;
import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshServiceAclsResponse;
import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshSuperUserGroupsConfigurationRequest;
import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshSuperUserGroupsConfigurationResponse;
import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshUserToGroupsMappingsRequest;
import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshUserToGroupsMappingsResponse;
import org.apache.hadoop.yarn.server.api.protocolrecords.RemoveFromClusterNodeLabelsRequest;
import org.apache.hadoop.yarn.server.api.protocolrecords.RemoveFromClusterNodeLabelsResponse;
import org.apache.hadoop.yarn.server.api.protocolrecords.ReplaceLabelsOnNodeRequest;
import org.apache.hadoop.yarn.server.api.protocolrecords.ReplaceLabelsOnNodeResponse;
import org.apache.hadoop.yarn.server.api.protocolrecords.UpdateNodeResourceRequest;
import org.apache.hadoop.yarn.server.api.protocolrecords.UpdateNodeResourceResponse;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.AddToClusterNodeLabelsRequestPBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.AddToClusterNodeLabelsResponsePBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.CheckForDecommissioningNodesRequestPBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.CheckForDecommissioningNodesResponsePBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshAdminAclsRequestPBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshAdminAclsResponsePBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshNodesRequestPBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshNodesResponsePBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshQueuesRequestPBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshQueuesResponsePBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshServiceAclsRequestPBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshServiceAclsResponsePBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshSuperUserGroupsConfigurationRequestPBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshSuperUserGroupsConfigurationResponsePBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshUserToGroupsMappingsRequestPBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshUserToGroupsMappingsResponsePBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RemoveFromClusterNodeLabelsRequestPBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RemoveFromClusterNodeLabelsResponsePBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.ReplaceLabelsOnNodeRequestPBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.ReplaceLabelsOnNodeResponsePBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.UpdateNodeResourceRequestPBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.UpdateNodeResourceResponsePBImpl;
import com.google.protobuf.ServiceException;
@Private
public class ResourceManagerAdministrationProtocolPBClientImpl implements ResourceManagerAdministrationProtocol, Closeable {
private ResourceManagerAdministrationProtocolPB proxy;
public ResourceManagerAdministrationProtocolPBClientImpl(long clientVersion, InetSocketAddress addr,
Configuration conf) throws IOException {
RPC.setProtocolEngine(conf, ResourceManagerAdministrationProtocolPB.class,
ProtobufRpcEngine.class);
proxy = (ResourceManagerAdministrationProtocolPB)RPC.getProxy(
ResourceManagerAdministrationProtocolPB.class, clientVersion, addr, conf);
}
@Override
public void close() {
if (this.proxy != null) {
RPC.stopProxy(this.proxy);
}
}
@Override
public RefreshQueuesResponse refreshQueues(RefreshQueuesRequest request)
throws YarnException, IOException {
RefreshQueuesRequestProto requestProto =
((RefreshQueuesRequestPBImpl)request).getProto();
try {
return new RefreshQueuesResponsePBImpl(
proxy.refreshQueues(null, requestProto));
} catch (ServiceException e) {
RPCUtil.unwrapAndThrowException(e);
return null;
}
}
@Override
public RefreshNodesResponse refreshNodes(RefreshNodesRequest request)
throws YarnException, IOException {
RefreshNodesRequestProto requestProto =
((RefreshNodesRequestPBImpl)request).getProto();
try {
return new RefreshNodesResponsePBImpl(
proxy.refreshNodes(null, requestProto));
} catch (ServiceException e) {
RPCUtil.unwrapAndThrowException(e);
return null;
}
}
@Override
public RefreshSuperUserGroupsConfigurationResponse refreshSuperUserGroupsConfiguration(
RefreshSuperUserGroupsConfigurationRequest request)
throws YarnException, IOException {
RefreshSuperUserGroupsConfigurationRequestProto requestProto =
((RefreshSuperUserGroupsConfigurationRequestPBImpl)request).getProto();
try {
return new RefreshSuperUserGroupsConfigurationResponsePBImpl(
proxy.refreshSuperUserGroupsConfiguration(null, requestProto));
} catch (ServiceException e) {
RPCUtil.unwrapAndThrowException(e);
return null;
}
}
@Override
public RefreshUserToGroupsMappingsResponse refreshUserToGroupsMappings(
RefreshUserToGroupsMappingsRequest request) throws YarnException,
IOException {
RefreshUserToGroupsMappingsRequestProto requestProto =
((RefreshUserToGroupsMappingsRequestPBImpl)request).getProto();
try {
return new RefreshUserToGroupsMappingsResponsePBImpl(
proxy.refreshUserToGroupsMappings(null, requestProto));
} catch (ServiceException e) {
RPCUtil.unwrapAndThrowException(e);
return null;
}
}
@Override
public RefreshAdminAclsResponse refreshAdminAcls(
RefreshAdminAclsRequest request) throws YarnException, IOException {
RefreshAdminAclsRequestProto requestProto =
((RefreshAdminAclsRequestPBImpl)request).getProto();
try {
return new RefreshAdminAclsResponsePBImpl(
proxy.refreshAdminAcls(null, requestProto));
} catch (ServiceException e) {
RPCUtil.unwrapAndThrowException(e);
return null;
}
}
@Override
public RefreshServiceAclsResponse refreshServiceAcls(
RefreshServiceAclsRequest request) throws YarnException,
IOException {
RefreshServiceAclsRequestProto requestProto =
((RefreshServiceAclsRequestPBImpl)request).getProto();
try {
return new RefreshServiceAclsResponsePBImpl(proxy.refreshServiceAcls(
null, requestProto));
} catch (ServiceException e) {
RPCUtil.unwrapAndThrowException(e);
return null;
}
}
@Override
public String[] getGroupsForUser(String user) throws IOException {
GetGroupsForUserRequestProto requestProto =
GetGroupsForUserRequestProto.newBuilder().setUser(user).build();
try {
GetGroupsForUserResponseProto responseProto =
proxy.getGroupsForUser(null, requestProto);
return (String[]) responseProto.getGroupsList().toArray(
new String[responseProto.getGroupsCount()]);
} catch (ServiceException e) {
throw ProtobufHelper.getRemoteException(e);
}
}
@Override
public UpdateNodeResourceResponse updateNodeResource(
UpdateNodeResourceRequest request) throws YarnException, IOException {
UpdateNodeResourceRequestProto requestProto =
((UpdateNodeResourceRequestPBImpl) request).getProto();
try {
return new UpdateNodeResourceResponsePBImpl(proxy.updateNodeResource(null,
requestProto));
} catch (ServiceException e) {
RPCUtil.unwrapAndThrowException(e);
return null;
}
}
@Override
public AddToClusterNodeLabelsResponse addToClusterNodeLabels(
AddToClusterNodeLabelsRequest request) throws YarnException, IOException {
AddToClusterNodeLabelsRequestProto requestProto =
((AddToClusterNodeLabelsRequestPBImpl) request).getProto();
try {
return new AddToClusterNodeLabelsResponsePBImpl(
proxy.addToClusterNodeLabels(null, requestProto));
} catch (ServiceException e) {
RPCUtil.unwrapAndThrowException(e);
return null;
}
}
@Override
public RemoveFromClusterNodeLabelsResponse removeFromClusterNodeLabels(
RemoveFromClusterNodeLabelsRequest request) throws YarnException,
IOException {
RemoveFromClusterNodeLabelsRequestProto requestProto =
((RemoveFromClusterNodeLabelsRequestPBImpl) request).getProto();
try {
return new RemoveFromClusterNodeLabelsResponsePBImpl(
proxy.removeFromClusterNodeLabels(null, requestProto));
} catch (ServiceException e) {
RPCUtil.unwrapAndThrowException(e);
return null;
}
}
@Override
public ReplaceLabelsOnNodeResponse replaceLabelsOnNode(
ReplaceLabelsOnNodeRequest request) throws YarnException, IOException {
ReplaceLabelsOnNodeRequestProto requestProto =
((ReplaceLabelsOnNodeRequestPBImpl) request).getProto();
try {
return new ReplaceLabelsOnNodeResponsePBImpl(proxy.replaceLabelsOnNodes(
null, requestProto));
} catch (ServiceException e) {
RPCUtil.unwrapAndThrowException(e);
return null;
}
}
@Override
public CheckForDecommissioningNodesResponse checkForDecommissioningNodes(
CheckForDecommissioningNodesRequest checkForDecommissioningNodesRequest)
throws YarnException, IOException {
CheckForDecommissioningNodesRequestProto requestProto =
((CheckForDecommissioningNodesRequestPBImpl) checkForDecommissioningNodesRequest)
.getProto();
try {
return new CheckForDecommissioningNodesResponsePBImpl(
proxy.checkForDecommissioningNodes(null, requestProto));
} catch (ServiceException e) {
RPCUtil.unwrapAndThrowException(e);
return null;
}
}
}
| 14,008 | 47.642361 | 124 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/impl/pb/client/SCMAdminProtocolPBClientImpl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.api.impl.pb.client;
import java.io.Closeable;
import java.io.IOException;
import java.net.InetSocketAddress;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.ipc.ProtobufRpcEngine;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.yarn.server.api.SCMAdminProtocol;
import org.apache.hadoop.yarn.server.api.SCMAdminProtocolPB;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RunSharedCacheCleanerTaskRequestPBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RunSharedCacheCleanerTaskResponsePBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.RunSharedCacheCleanerTaskRequest;
import org.apache.hadoop.yarn.server.api.protocolrecords.RunSharedCacheCleanerTaskResponse;
import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.ipc.RPCUtil;
import org.apache.hadoop.yarn.proto.YarnServiceProtos;
import com.google.protobuf.ServiceException;
public class SCMAdminProtocolPBClientImpl implements SCMAdminProtocol,
Closeable {
private SCMAdminProtocolPB proxy;
public SCMAdminProtocolPBClientImpl(long clientVersion,
InetSocketAddress addr, Configuration conf) throws IOException {
RPC.setProtocolEngine(conf, SCMAdminProtocolPB.class,
ProtobufRpcEngine.class);
proxy = RPC.getProxy(SCMAdminProtocolPB.class, clientVersion, addr, conf);
}
@Override
public void close() {
if (this.proxy != null) {
RPC.stopProxy(this.proxy);
}
}
@Override
public RunSharedCacheCleanerTaskResponse runCleanerTask(
RunSharedCacheCleanerTaskRequest request) throws YarnException,
IOException {
YarnServiceProtos.RunSharedCacheCleanerTaskRequestProto requestProto =
((RunSharedCacheCleanerTaskRequestPBImpl) request).getProto();
try {
return new RunSharedCacheCleanerTaskResponsePBImpl(proxy.runCleanerTask(null,
requestProto));
} catch (ServiceException e) {
RPCUtil.unwrapAndThrowException(e);
return null;
}
}
}
| 2,874 | 37.851351 | 105 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/AbstractRegistryTest.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.registry;
import org.apache.hadoop.fs.PathNotFoundException;
import org.apache.hadoop.registry.client.api.RegistryOperations;
import org.apache.hadoop.registry.client.binding.RegistryPathUtils;
import org.apache.hadoop.registry.client.types.yarn.PersistencePolicies;
import org.apache.hadoop.registry.client.types.ServiceRecord;
import org.apache.hadoop.registry.server.integration.RMRegistryOperationsService;
import org.junit.Before;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.net.URISyntaxException;
/**
* Abstract registry tests .. inits the field {@link #registry}
* before the test with an instance of {@link RMRegistryOperationsService};
* and {@link #operations} with the same instance cast purely
* to the type {@link RegistryOperations}.
*
*/
public class AbstractRegistryTest extends AbstractZKRegistryTest {
private static final Logger LOG =
LoggerFactory.getLogger(AbstractRegistryTest.class);
protected RMRegistryOperationsService registry;
protected RegistryOperations operations;
@Before
public void setupRegistry() throws IOException {
registry = new RMRegistryOperationsService("yarnRegistry");
operations = registry;
registry.init(createRegistryConfiguration());
registry.start();
operations.delete("/", true);
registry.createRootRegistryPaths();
addToTeardown(registry);
}
/**
* Create a service entry with the sample endpoints, and put it
* at the destination
* @param path path
* @param createFlags flags
* @return the record
* @throws IOException on a failure
*/
protected ServiceRecord putExampleServiceEntry(String path, int createFlags) throws
IOException,
URISyntaxException {
return putExampleServiceEntry(path, createFlags, PersistencePolicies.PERMANENT);
}
/**
* Create a service entry with the sample endpoints, and put it
* at the destination
* @param path path
* @param createFlags flags
* @return the record
* @throws IOException on a failure
*/
protected ServiceRecord putExampleServiceEntry(String path,
int createFlags,
String persistence)
throws IOException, URISyntaxException {
ServiceRecord record = buildExampleServiceEntry(persistence);
registry.mknode(RegistryPathUtils.parentOf(path), true);
operations.bind(path, record, createFlags);
return record;
}
/**
* Assert a path exists
* @param path path in the registry
* @throws IOException
*/
public void assertPathExists(String path) throws IOException {
operations.stat(path);
}
/**
* assert that a path does not exist
* @param path path in the registry
* @throws IOException
*/
public void assertPathNotFound(String path) throws IOException {
try {
operations.stat(path);
fail("Path unexpectedly found: " + path);
} catch (PathNotFoundException e) {
}
}
/**
* Assert that a path resolves to a service record
* @param path path in the registry
* @throws IOException
*/
public void assertResolves(String path) throws IOException {
operations.resolve(path);
}
}
| 3,995 | 31.225806 | 85 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/AbstractZKRegistryTest.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.registry;
import org.apache.commons.io.FileUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.service.Service;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.registry.client.api.RegistryConstants;
import org.apache.hadoop.registry.server.services.AddingCompositeService;
import org.apache.hadoop.registry.server.services.MicroZookeeperService;
import org.apache.hadoop.registry.server.services.MicroZookeeperServiceKeys;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Rule;
import org.junit.rules.TestName;
import org.junit.rules.Timeout;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.io.IOException;
public class AbstractZKRegistryTest extends RegistryTestHelper {
private static final Logger LOG =
LoggerFactory.getLogger(AbstractZKRegistryTest.class);
private static final AddingCompositeService servicesToTeardown =
new AddingCompositeService("teardown");
// static initializer guarantees it is always started
// ahead of any @BeforeClass methods
static {
servicesToTeardown.init(new Configuration());
servicesToTeardown.start();
}
@Rule
public final Timeout testTimeout = new Timeout(10000);
@Rule
public TestName methodName = new TestName();
protected static void addToTeardown(Service svc) {
servicesToTeardown.addService(svc);
}
@AfterClass
public static void teardownServices() throws IOException {
describe(LOG, "teardown of static services");
servicesToTeardown.close();
}
protected static MicroZookeeperService zookeeper;
@BeforeClass
public static void createZKServer() throws Exception {
File zkDir = new File("target/zookeeper");
FileUtils.deleteDirectory(zkDir);
assertTrue(zkDir.mkdirs());
zookeeper = new MicroZookeeperService("InMemoryZKService");
YarnConfiguration conf = new YarnConfiguration();
conf.set(MicroZookeeperServiceKeys.KEY_ZKSERVICE_DIR, zkDir.getAbsolutePath());
zookeeper.init(conf);
zookeeper.start();
addToTeardown(zookeeper);
}
/**
* give our thread a name
*/
@Before
public void nameThread() {
Thread.currentThread().setName("JUnit");
}
/**
* Returns the connection string to use
*
* @return connection string
*/
public String getConnectString() {
return zookeeper.getConnectionString();
}
public YarnConfiguration createRegistryConfiguration() {
YarnConfiguration conf = new YarnConfiguration();
conf.setInt(RegistryConstants.KEY_REGISTRY_ZK_CONNECTION_TIMEOUT, 1000);
conf.setInt(RegistryConstants.KEY_REGISTRY_ZK_RETRY_INTERVAL, 500);
conf.setInt(RegistryConstants.KEY_REGISTRY_ZK_RETRY_TIMES, 10);
conf.setInt(RegistryConstants.KEY_REGISTRY_ZK_RETRY_CEILING, 10);
conf.set(RegistryConstants.KEY_REGISTRY_ZK_QUORUM,
zookeeper.getConnectionString());
return conf;
}
}
| 3,798 | 32.324561 | 83 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/RegistryTestHelper.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.registry;
import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.registry.client.api.RegistryConstants;
import org.apache.hadoop.registry.client.binding.RegistryUtils;
import org.apache.hadoop.registry.client.binding.RegistryTypeUtils;
import org.apache.hadoop.registry.client.types.AddressTypes;
import org.apache.hadoop.registry.client.types.Endpoint;
import org.apache.hadoop.registry.client.types.ProtocolTypes;
import org.apache.hadoop.registry.client.types.ServiceRecord;
import org.apache.hadoop.registry.client.types.yarn.YarnRegistryAttributes;
import org.apache.hadoop.registry.secure.AbstractSecureRegistryTest;
import org.apache.zookeeper.common.PathUtils;
import org.junit.Assert;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.security.auth.Subject;
import javax.security.auth.login.LoginContext;
import javax.security.auth.login.LoginException;
import java.io.File;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.List;
import java.util.Map;
import static org.apache.hadoop.registry.client.binding.RegistryTypeUtils.*;
/**
* This is a set of static methods to aid testing the registry operations.
* The methods can be imported statically —or the class used as a base
* class for tests.
*/
public class RegistryTestHelper extends Assert {
public static final String SC_HADOOP = "org-apache-hadoop";
public static final String USER = "devteam/";
public static final String NAME = "hdfs";
public static final String API_WEBHDFS = "classpath:org.apache.hadoop.namenode.webhdfs";
public static final String API_HDFS = "classpath:org.apache.hadoop.namenode.dfs";
public static final String USERPATH = RegistryConstants.PATH_USERS + USER;
public static final String PARENT_PATH = USERPATH + SC_HADOOP + "/";
public static final String ENTRY_PATH = PARENT_PATH + NAME;
public static final String NNIPC = "uuid:423C2B93-C927-4050-AEC6-6540E6646437";
public static final String IPC2 = "uuid:0663501D-5AD3-4F7E-9419-52F5D6636FCF";
private static final Logger LOG =
LoggerFactory.getLogger(RegistryTestHelper.class);
private static final RegistryUtils.ServiceRecordMarshal recordMarshal =
new RegistryUtils.ServiceRecordMarshal();
public static final String HTTP_API = "http://";
/**
* Assert the path is valid by ZK rules
* @param path path to check
*/
public static void assertValidZKPath(String path) {
try {
PathUtils.validatePath(path);
} catch (IllegalArgumentException e) {
throw new IllegalArgumentException("Invalid Path " + path + ": " + e, e);
}
}
/**
* Assert that a string is not empty (null or "")
* @param message message to raise if the string is empty
* @param check string to check
*/
public static void assertNotEmpty(String message, String check) {
if (StringUtils.isEmpty(check)) {
fail(message);
}
}
/**
* Assert that a string is empty (null or "")
* @param check string to check
*/
public static void assertNotEmpty(String check) {
if (StringUtils.isEmpty(check)) {
fail("Empty string");
}
}
/**
* Log the details of a login context
* @param name name to assert that the user is logged in as
* @param loginContext the login context
*/
public static void logLoginDetails(String name,
LoginContext loginContext) {
assertNotNull("Null login context", loginContext);
Subject subject = loginContext.getSubject();
LOG.info("Logged in as {}:\n {}", name, subject);
}
/**
* Set the JVM property to enable Kerberos debugging
*/
public static void enableKerberosDebugging() {
System.setProperty(AbstractSecureRegistryTest.SUN_SECURITY_KRB5_DEBUG,
"true");
}
/**
* Set the JVM property to enable Kerberos debugging
*/
public static void disableKerberosDebugging() {
System.setProperty(AbstractSecureRegistryTest.SUN_SECURITY_KRB5_DEBUG,
"false");
}
/**
* General code to validate bits of a component/service entry built iwth
* {@link #addSampleEndpoints(ServiceRecord, String)}
* @param record instance to check
*/
public static void validateEntry(ServiceRecord record) {
assertNotNull("null service record", record);
List<Endpoint> endpoints = record.external;
assertEquals(2, endpoints.size());
Endpoint webhdfs = findEndpoint(record, API_WEBHDFS, true, 1, 1);
assertEquals(API_WEBHDFS, webhdfs.api);
assertEquals(AddressTypes.ADDRESS_URI, webhdfs.addressType);
assertEquals(ProtocolTypes.PROTOCOL_REST, webhdfs.protocolType);
List<Map<String, String>> addressList = webhdfs.addresses;
Map<String, String> url = addressList.get(0);
String addr = url.get("uri");
assertTrue(addr.contains("http"));
assertTrue(addr.contains(":8020"));
Endpoint nnipc = findEndpoint(record, NNIPC, false, 1,2);
assertEquals("wrong protocol in " + nnipc, ProtocolTypes.PROTOCOL_THRIFT,
nnipc.protocolType);
Endpoint ipc2 = findEndpoint(record, IPC2, false, 1,2);
assertNotNull(ipc2);
Endpoint web = findEndpoint(record, HTTP_API, true, 1, 1);
assertEquals(1, web.addresses.size());
assertEquals(1, web.addresses.get(0).size());
}
/**
* Assert that an endpoint matches the criteria
* @param endpoint endpoint to examine
* @param addressType expected address type
* @param protocolType expected protocol type
* @param api API
*/
public static void assertMatches(Endpoint endpoint,
String addressType,
String protocolType,
String api) {
assertNotNull(endpoint);
assertEquals(addressType, endpoint.addressType);
assertEquals(protocolType, endpoint.protocolType);
assertEquals(api, endpoint.api);
}
/**
* Assert the records match.
* @param source record that was written
* @param resolved the one that resolved.
*/
public static void assertMatches(ServiceRecord source, ServiceRecord resolved) {
assertNotNull("Null source record ", source);
assertNotNull("Null resolved record ", resolved);
assertEquals(source.description, resolved.description);
Map<String, String> srcAttrs = source.attributes();
Map<String, String> resolvedAttrs = resolved.attributes();
String sourceAsString = source.toString();
String resolvedAsString = resolved.toString();
assertEquals("Wrong count of attrs in \n" + sourceAsString
+ "\nfrom\n" + resolvedAsString,
srcAttrs.size(),
resolvedAttrs.size());
for (Map.Entry<String, String> entry : srcAttrs.entrySet()) {
String attr = entry.getKey();
assertEquals("attribute "+ attr, entry.getValue(), resolved.get(attr));
}
assertEquals("wrong external endpoint count",
source.external.size(), resolved.external.size());
assertEquals("wrong external endpoint count",
source.internal.size(), resolved.internal.size());
}
/**
* Find an endpoint in a record or fail,
* @param record record
* @param api API
* @param external external?
* @param addressElements expected # of address elements?
* @param addressTupleSize expected size of a type
* @return the endpoint.
*/
public static Endpoint findEndpoint(ServiceRecord record,
String api, boolean external, int addressElements, int addressTupleSize) {
Endpoint epr = external ? record.getExternalEndpoint(api)
: record.getInternalEndpoint(api);
if (epr != null) {
assertEquals("wrong # of addresses",
addressElements, epr.addresses.size());
assertEquals("wrong # of elements in an address tuple",
addressTupleSize, epr.addresses.get(0).size());
return epr;
}
List<Endpoint> endpoints = external ? record.external : record.internal;
StringBuilder builder = new StringBuilder();
for (Endpoint endpoint : endpoints) {
builder.append("\"").append(endpoint).append("\" ");
}
fail("Did not find " + api + " in endpoints " + builder);
// never reached; here to keep the compiler happy
return null;
}
/**
* Log a record
* @param name record name
* @param record details
* @throws IOException only if something bizarre goes wrong marshalling
* a record.
*/
public static void logRecord(String name, ServiceRecord record) throws
IOException {
LOG.info(" {} = \n{}\n", name, recordMarshal.toJson(record));
}
/**
* Create a service entry with the sample endpoints
* @param persistence persistence policy
* @return the record
* @throws IOException on a failure
*/
public static ServiceRecord buildExampleServiceEntry(String persistence) throws
IOException,
URISyntaxException {
ServiceRecord record = new ServiceRecord();
record.set(YarnRegistryAttributes.YARN_ID, "example-0001");
record.set(YarnRegistryAttributes.YARN_PERSISTENCE, persistence);
addSampleEndpoints(record, "namenode");
return record;
}
/**
* Add some endpoints
* @param entry entry
*/
public static void addSampleEndpoints(ServiceRecord entry, String hostname)
throws URISyntaxException {
assertNotNull(hostname);
entry.addExternalEndpoint(webEndpoint(HTTP_API,
new URI("http", hostname + ":80", "/")));
entry.addExternalEndpoint(
restEndpoint(API_WEBHDFS,
new URI("http", hostname + ":8020", "/")));
Endpoint endpoint = ipcEndpoint(API_HDFS, null);
endpoint.addresses.add(RegistryTypeUtils.hostnamePortPair(hostname, 8030));
entry.addInternalEndpoint(endpoint);
InetSocketAddress localhost = new InetSocketAddress("localhost", 8050);
entry.addInternalEndpoint(
inetAddrEndpoint(NNIPC, ProtocolTypes.PROTOCOL_THRIFT, "localhost",
8050));
entry.addInternalEndpoint(
RegistryTypeUtils.ipcEndpoint(
IPC2, localhost));
}
/**
* Describe the stage in the process with a box around it -so as
* to highlight it in test logs
* @param log log to use
* @param text text
* @param args logger args
*/
public static void describe(Logger log, String text, Object...args) {
log.info("\n=======================================");
log.info(text, args);
log.info("=======================================\n");
}
/**
* log out from a context if non-null ... exceptions are caught and logged
* @param login login context
* @return null, always
*/
public static LoginContext logout(LoginContext login) {
try {
if (login != null) {
LOG.debug("Logging out login context {}", login.toString());
login.logout();
}
} catch (LoginException e) {
LOG.warn("Exception logging out: {}", e, e);
}
return null;
}
/**
* Login via a UGI. Requres UGI to have been set up
* @param user username
* @param keytab keytab to list
* @return the UGI
* @throws IOException
*/
public static UserGroupInformation loginUGI(String user, File keytab) throws
IOException {
LOG.info("Logging in as {} from {}", user, keytab);
return UserGroupInformation.loginUserFromKeytabAndReturnUGI(user,
keytab.getAbsolutePath());
}
public static ServiceRecord createRecord(String persistence) {
return createRecord("01", persistence, "description");
}
public static ServiceRecord createRecord(String id, String persistence,
String description) {
ServiceRecord serviceRecord = new ServiceRecord();
serviceRecord.set(YarnRegistryAttributes.YARN_ID, id);
serviceRecord.description = description;
serviceRecord.set(YarnRegistryAttributes.YARN_PERSISTENCE, persistence);
return serviceRecord;
}
public static ServiceRecord createRecord(String id, String persistence,
String description, String data) {
return createRecord(id, persistence, description);
}
}
| 12,823 | 35.225989 | 90 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/cli/TestRegistryCli.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.registry.cli;
import java.io.ByteArrayOutputStream;
import java.io.PrintStream;
import org.apache.hadoop.registry.AbstractRegistryTest;
import org.apache.hadoop.registry.operations.TestRegistryOperations;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class TestRegistryCli extends AbstractRegistryTest {
protected static final Logger LOG =
LoggerFactory.getLogger(TestRegistryOperations.class);
private ByteArrayOutputStream sysOutStream;
private PrintStream sysOut;
private ByteArrayOutputStream sysErrStream;
private PrintStream sysErr;
private RegistryCli cli;
@Before
public void setUp() throws Exception {
sysOutStream = new ByteArrayOutputStream();
sysOut = new PrintStream(sysOutStream);
sysErrStream = new ByteArrayOutputStream();
sysErr = new PrintStream(sysErrStream);
System.setOut(sysOut);
cli = new RegistryCli(operations, createRegistryConfiguration(), sysOut, sysErr);
}
@After
public void tearDown() throws Exception {
cli.close();
}
private void assertResult(RegistryCli cli, int code, String...args) throws Exception {
int result = cli.run(args);
assertEquals(code, result);
}
@Test
public void testBadCommands() throws Exception {
assertResult(cli, -1, new String[] { });
assertResult(cli, -1, "foo");
}
@Test
public void testInvalidNumArgs() throws Exception {
assertResult(cli, -1, "ls");
assertResult(cli, -1, "ls", "/path", "/extraPath");
assertResult(cli, -1, "resolve");
assertResult(cli, -1, "resolve", "/path", "/extraPath");
assertResult(cli, -1, "mknode");
assertResult(cli, -1, "mknode", "/path", "/extraPath");
assertResult(cli, -1, "rm");
assertResult(cli, -1, "rm", "/path", "/extraPath");
assertResult(cli, -1, "bind");
assertResult(cli, -1, "bind", "foo");
assertResult(cli, -1, "bind", "-inet", "foo");
assertResult(cli, -1, "bind", "-inet", "-api", "-p", "378", "-h", "host", "/foo");
assertResult(cli, -1, "bind", "-inet", "-api", "Api", "-p", "-h", "host", "/foo");
assertResult(cli, -1, "bind", "-inet", "-api", "Api", "-p", "378", "-h", "/foo");
assertResult(cli, -1, "bind", "-inet", "-api", "Api", "-p", "378", "-h", "host");
assertResult(cli, -1, "bind", "-api", "Api", "-p", "378", "-h", "host", "/foo");
assertResult(cli, -1, "bind", "-webui", "foo");
assertResult(cli, -1, "bind", "-webui", "-api", "Api", "/foo");
assertResult(cli, -1, "bind", "-webui", "uriString", "-api", "/foo");
assertResult(cli, -1, "bind", "-webui", "uriString", "-api", "Api");
assertResult(cli, -1, "bind", "-rest", "foo");
assertResult(cli, -1, "bind", "-rest", "uriString", "-api", "Api");
assertResult(cli, -1, "bind", "-rest", "-api", "Api", "/foo");
assertResult(cli, -1, "bind", "-rest", "uriString", "-api", "/foo");
assertResult(cli, -1, "bind", "uriString", "-api", "Api", "/foo");
}
@Test
public void testBadArgType() throws Exception {
assertResult(cli, -1, "bind", "-inet", "-api", "Api", "-p", "fooPort", "-h",
"host", "/dir");
}
@Test
public void testBadPath() throws Exception {
assertResult(cli, -1, "ls", "NonSlashPath");
assertResult(cli, -1, "ls", "//");
assertResult(cli, -1, "resolve", "NonSlashPath");
assertResult(cli, -1, "resolve", "//");
assertResult(cli, -1, "mknode", "NonSlashPath");
assertResult(cli, -1, "mknode", "//");
assertResult(cli, -1, "rm", "NonSlashPath");
assertResult(cli, -1, "rm", "//");
assertResult(cli, -1, "bind", "-inet", "-api", "Api", "-p", "378", "-h", "host", "NonSlashPath");
assertResult(cli, -1, "bind", "-inet", "-api", "Api", "-p", "378", "-h", "host", "//");
assertResult(cli, -1, "bind", "-webui", "uriString", "-api", "Api", "NonSlashPath");
assertResult(cli, -1, "bind", "-webui", "uriString", "-api", "Api", "//");
assertResult(cli, -1, "bind", "-rest", "uriString", "-api", "Api", "NonSlashPath");
assertResult(cli, -1, "bind", "-rest", "uriString", "-api", "Api", "//");
}
@Test
public void testNotExistingPaths() throws Exception {
assertResult(cli, -1, "ls", "/nonexisting_path");
assertResult(cli, -1, "ls", "/NonExistingDir/nonexisting_path");
assertResult(cli, -1, "resolve", "/nonexisting_path");
assertResult(cli, -1, "resolve", "/NonExistingDir/nonexisting_path");
assertResult(cli, -1, "bind", "-inet", "-api", "Api", "-p", "378", "-h", "host", "/NonExistingDir/nonexisting_path");
assertResult(cli, -1, "bind", "-webui", "uriString", "-api", "Api", "/NonExistingDir/nonexisting_path");
assertResult(cli, -1, "bind", "-rest", "uriString", "-api", "Api", "/NonExistingDir/nonexisting_path");
}
@Test
public void testValidCommands() throws Exception {
assertResult(cli, 0, "bind", "-inet", "-api", "Api", "-p", "378", "-h", "host", "/foo");
assertResult(cli, 0, "resolve", "/foo");
assertResult(cli, 0, "rm", "/foo");
assertResult(cli, -1, "resolve", "/foo");
assertResult(cli, 0, "bind", "-webui", "uriString", "-api", "Api", "/foo");
assertResult(cli, 0, "resolve", "/foo");
assertResult(cli, 0, "rm", "/foo");
assertResult(cli, -1, "resolve", "/foo");
assertResult(cli, 0, "bind", "-rest", "uriString", "-api", "Api", "/foo");
assertResult(cli, 0, "resolve", "/foo");
assertResult(cli, 0, "rm", "/foo");
assertResult(cli, -1, "resolve", "/foo");
//Test Sub Directories Binds
assertResult(cli, 0, "mknode", "/subdir");
assertResult(cli, -1, "resolve", "/subdir");
assertResult(cli, 0, "bind", "-inet", "-api", "Api", "-p", "378", "-h", "host", "/subdir/foo");
assertResult(cli, 0, "resolve", "/subdir/foo");
assertResult(cli, 0, "rm", "/subdir/foo");
assertResult(cli, -1, "resolve", "/subdir/foo");
assertResult(cli, 0, "bind", "-webui", "uriString", "-api", "Api", "/subdir/foo");
assertResult(cli, 0, "resolve", "/subdir/foo");
assertResult(cli, 0, "rm", "/subdir/foo");
assertResult(cli, -1, "resolve", "/subdir/foo");
assertResult(cli, 0, "bind", "-rest", "uriString", "-api", "Api", "/subdir/foo");
assertResult(cli, 0, "resolve", "/subdir/foo");
assertResult(cli, 0, "rm", "/subdir/foo");
assertResult(cli, -1, "resolve", "/subdir/foo");
assertResult(cli, 0, "rm", "/subdir");
assertResult(cli, -1, "resolve", "/subdir");
//Test Bind that the dir itself
assertResult(cli, 0, "mknode", "/dir");
assertResult(cli, -1, "resolve", "/dir");
assertResult(cli, 0, "bind", "-inet", "-api", "Api", "-p", "378", "-h", "host", "/dir");
assertResult(cli, 0, "resolve", "/dir");
assertResult(cli, 0, "rm", "/dir");
assertResult(cli, -1, "resolve", "/dir");
assertResult(cli, 0, "mknode", "/dir");
assertResult(cli, -1, "resolve", "/dir");
assertResult(cli, 0, "bind", "-webui", "uriString", "-api", "Api", "/dir");
assertResult(cli, 0, "resolve", "/dir");
assertResult(cli, 0, "rm", "/dir");
assertResult(cli, -1, "resolve", "/dir");
assertResult(cli, 0, "mknode", "/dir");
assertResult(cli, -1, "resolve", "/dir");
assertResult(cli, 0, "bind", "-rest", "uriString", "-api", "Api", "/dir");
assertResult(cli, 0, "resolve", "/dir");
assertResult(cli, 0, "rm", "/dir");
assertResult(cli, -1, "resolve", "/dir");
assertResult(cli, 0, "rm", "/Nonexitent");
}
}
| 8,304 | 40.944444 | 121 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/client/binding/TestRegistryPathUtils.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.registry.client.binding;
import static org.apache.hadoop.registry.client.binding.RegistryPathUtils.*;
import java.io.IOException;
import java.util.List;
import org.apache.hadoop.fs.PathNotFoundException;
import org.apache.hadoop.registry.client.exceptions.InvalidPathnameException;
import org.junit.Assert;
import org.junit.Test;
public class TestRegistryPathUtils extends Assert {
public static final String EURO = "\u20AC";
@Test
public void testFormatAscii() throws Throwable {
String in = "hostname01101101-1";
assertConverted(in, in);
}
/*
* Euro symbol
*/
@Test
public void testFormatEuroSymbol() throws Throwable {
assertConverted("xn--lzg", EURO);
}
@Test
public void testFormatIdempotent() throws Throwable {
assertConverted("xn--lzg", RegistryPathUtils.encodeForRegistry(EURO));
}
@Test
public void testFormatCyrillicSpaced() throws Throwable {
assertConverted("xn--pa 3-k4di", "\u0413PA\u0414 3");
}
protected void assertConverted(String expected, String in) {
String out = RegistryPathUtils.encodeForRegistry(in);
assertEquals("Conversion of " + in, expected, out);
}
@Test
public void testPaths() throws Throwable {
assertCreatedPathEquals("/", "/", "");
assertCreatedPathEquals("/", "", "");
assertCreatedPathEquals("/", "", "/");
assertCreatedPathEquals("/", "/", "/");
assertCreatedPathEquals("/a", "/a", "");
assertCreatedPathEquals("/a", "/", "a");
assertCreatedPathEquals("/a/b", "/a", "b");
assertCreatedPathEquals("/a/b", "/a/", "b");
assertCreatedPathEquals("/a/b", "/a", "/b");
assertCreatedPathEquals("/a/b", "/a", "/b/");
assertCreatedPathEquals("/a", "/a", "/");
assertCreatedPathEquals("/alice", "/", "/alice");
assertCreatedPathEquals("/alice", "/alice", "/");
}
@Test
public void testComplexPaths() throws Throwable {
assertCreatedPathEquals("/", "", "");
assertCreatedPathEquals("/yarn/registry/users/hadoop/org-apache-hadoop",
"/yarn/registry",
"users/hadoop/org-apache-hadoop/");
}
private static void assertCreatedPathEquals(String expected, String base,
String path) throws IOException {
String fullPath = createFullPath(base, path);
assertEquals("\"" + base + "\" + \"" + path + "\" =\"" + fullPath + "\"",
expected, fullPath);
}
@Test
public void testSplittingEmpty() throws Throwable {
assertEquals(0, split("").size());
assertEquals(0, split("/").size());
assertEquals(0, split("///").size());
}
@Test
public void testSplitting() throws Throwable {
assertEquals(1, split("/a").size());
assertEquals(0, split("/").size());
assertEquals(3, split("/a/b/c").size());
assertEquals(3, split("/a/b/c/").size());
assertEquals(3, split("a/b/c").size());
assertEquals(3, split("/a/b//c").size());
assertEquals(3, split("//a/b/c/").size());
List<String> split = split("//a/b/c/");
assertEquals("a", split.get(0));
assertEquals("b", split.get(1));
assertEquals("c", split.get(2));
}
@Test
public void testParentOf() throws Throwable {
assertEquals("/", parentOf("/a"));
assertEquals("/", parentOf("/a/"));
assertEquals("/a", parentOf("/a/b"));
assertEquals("/a/b", parentOf("/a/b/c"));
}
@Test
public void testLastPathEntry() throws Throwable {
assertEquals("",lastPathEntry("/"));
assertEquals("",lastPathEntry("//"));
assertEquals("c",lastPathEntry("/a/b/c"));
assertEquals("c",lastPathEntry("/a/b/c/"));
}
@Test(expected = PathNotFoundException.class)
public void testParentOfRoot() throws Throwable {
parentOf("/");
}
@Test
public void testValidPaths() throws Throwable {
assertValidPath("/");
assertValidPath("/a/b/c");
assertValidPath("/users/drwho/org-apache-hadoop/registry/appid-55-55");
assertValidPath("/a50");
}
@Test
public void testInvalidPaths() throws Throwable {
assertInvalidPath("/a_b");
assertInvalidPath("/UpperAndLowerCase");
assertInvalidPath("/space in string");
// Is this valid? assertInvalidPath("/50");
}
private void assertValidPath(String path) throws InvalidPathnameException {
validateZKPath(path);
}
private void assertInvalidPath(String path) throws InvalidPathnameException {
try {
validateElementsAsDNS(path);
fail("path considered valid: " + path);
} catch (InvalidPathnameException expected) {
// expected
}
}
}
| 5,328 | 28.77095 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/client/binding/TestRegistryOperationUtils.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.registry.client.binding;
import org.apache.hadoop.security.UserGroupInformation;
import org.junit.Assert;
import org.junit.Test;
/**
* Tests for the {@link RegistryUtils} class
*/
public class TestRegistryOperationUtils extends Assert {
@Test
public void testUsernameExtractionEnvVarOverrride() throws Throwable {
String whoami = RegistryUtils.getCurrentUsernameUnencoded("drwho");
assertEquals("drwho", whoami);
}
@Test
public void testUsernameExtractionCurrentuser() throws Throwable {
String whoami = RegistryUtils.getCurrentUsernameUnencoded("");
String ugiUser = UserGroupInformation.getCurrentUser().getShortUserName();
assertEquals(ugiUser, whoami);
}
@Test
public void testShortenUsername() throws Throwable {
assertEquals("hbase",
RegistryUtils.convertUsername("[email protected]"));
assertEquals("hbase",
RegistryUtils.convertUsername("hbase/[email protected]"));
assertEquals("hbase",
RegistryUtils.convertUsername("hbase"));
assertEquals("hbase user",
RegistryUtils.convertUsername("hbase user"));
}
}
| 1,958 | 33.368421 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/client/binding/TestMarshalling.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.registry.client.binding;
import org.apache.hadoop.registry.RegistryTestHelper;
import org.apache.hadoop.registry.client.exceptions.InvalidRecordException;
import org.apache.hadoop.registry.client.exceptions.NoRecordException;
import org.apache.hadoop.registry.client.types.ServiceRecord;
import org.apache.hadoop.registry.client.types.yarn.PersistencePolicies;
import org.junit.BeforeClass;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.TestName;
import org.junit.rules.Timeout;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Test record marshalling
*/
public class TestMarshalling extends RegistryTestHelper {
private static final Logger
LOG = LoggerFactory.getLogger(TestMarshalling.class);
@Rule
public final Timeout testTimeout = new Timeout(10000);
@Rule
public TestName methodName = new TestName();
private static RegistryUtils.ServiceRecordMarshal marshal;
@BeforeClass
public static void setupClass() {
marshal = new RegistryUtils.ServiceRecordMarshal();
}
@Test
public void testRoundTrip() throws Throwable {
String persistence = PersistencePolicies.PERMANENT;
ServiceRecord record = createRecord(persistence);
record.set("customkey", "customvalue");
record.set("customkey2", "customvalue2");
RegistryTypeUtils.validateServiceRecord("", record);
LOG.info(marshal.toJson(record));
byte[] bytes = marshal.toBytes(record);
ServiceRecord r2 = marshal.fromBytes("", bytes);
assertMatches(record, r2);
RegistryTypeUtils.validateServiceRecord("", r2);
}
@Test(expected = NoRecordException.class)
public void testUnmarshallNoData() throws Throwable {
marshal.fromBytes("src", new byte[]{});
}
@Test(expected = NoRecordException.class)
public void testUnmarshallNotEnoughData() throws Throwable {
// this is nominally JSON -but without the service record header
marshal.fromBytes("src", new byte[]{'{','}'}, ServiceRecord.RECORD_TYPE);
}
@Test(expected = InvalidRecordException.class)
public void testUnmarshallNoBody() throws Throwable {
byte[] bytes = "this is not valid JSON at all and should fail".getBytes();
marshal.fromBytes("src", bytes);
}
@Test(expected = InvalidRecordException.class)
public void testUnmarshallWrongType() throws Throwable {
byte[] bytes = "{'type':''}".getBytes();
ServiceRecord serviceRecord = marshal.fromBytes("marshalling", bytes);
RegistryTypeUtils.validateServiceRecord("validating", serviceRecord);
}
@Test(expected = NoRecordException.class)
public void testUnmarshallWrongLongType() throws Throwable {
ServiceRecord record = new ServiceRecord();
record.type = "ThisRecordHasALongButNonMatchingType";
byte[] bytes = marshal.toBytes(record);
ServiceRecord serviceRecord = marshal.fromBytes("marshalling",
bytes, ServiceRecord.RECORD_TYPE);
}
@Test(expected = NoRecordException.class)
public void testUnmarshallNoType() throws Throwable {
ServiceRecord record = new ServiceRecord();
record.type = "NoRecord";
byte[] bytes = marshal.toBytes(record);
ServiceRecord serviceRecord = marshal.fromBytes("marshalling",
bytes, ServiceRecord.RECORD_TYPE);
}
@Test(expected = InvalidRecordException.class)
public void testRecordValidationWrongType() throws Throwable {
ServiceRecord record = new ServiceRecord();
record.type = "NotAServiceRecordType";
RegistryTypeUtils.validateServiceRecord("validating", record);
}
@Test
public void testUnknownFieldsRoundTrip() throws Throwable {
ServiceRecord record =
createRecord(PersistencePolicies.APPLICATION_ATTEMPT);
record.set("key", "value");
record.set("intval", "2");
assertEquals("value", record.get("key"));
assertEquals("2", record.get("intval"));
assertNull(record.get("null"));
assertEquals("defval", record.get("null", "defval"));
byte[] bytes = marshal.toBytes(record);
ServiceRecord r2 = marshal.fromBytes("", bytes);
assertEquals("value", r2.get("key"));
assertEquals("2", r2.get("intval"));
}
@Test
public void testFieldPropagationInCopy() throws Throwable {
ServiceRecord record =
createRecord(PersistencePolicies.APPLICATION_ATTEMPT);
record.set("key", "value");
record.set("intval", "2");
ServiceRecord that = new ServiceRecord(record);
assertMatches(record, that);
}
}
| 5,247 | 35.444444 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/client/impl/CuratorEventCatcher.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.registry.client.impl;
import org.apache.curator.framework.CuratorFramework;
import org.apache.curator.framework.api.BackgroundCallback;
import org.apache.curator.framework.api.CuratorEvent;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.atomic.AtomicInteger;
/**
* This is a little event catcher for curator asynchronous
* operations.
*/
public class CuratorEventCatcher implements BackgroundCallback {
private static final Logger LOG =
LoggerFactory.getLogger(CuratorEventCatcher.class);
public final BlockingQueue<CuratorEvent>
events = new LinkedBlockingQueue<CuratorEvent>(1);
private final AtomicInteger eventCounter = new AtomicInteger(0);
@Override
public void processResult(CuratorFramework client,
CuratorEvent event) throws
Exception {
LOG.info("received {}", event);
eventCounter.incrementAndGet();
events.put(event);
}
public int getCount() {
return eventCounter.get();
}
/**
* Blocking operation to take the first event off the queue
* @return the first event on the queue, when it arrives
* @throws InterruptedException if interrupted
*/
public CuratorEvent take() throws InterruptedException {
return events.take();
}
}
| 2,190 | 30.753623 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/client/impl/TestMicroZookeeperService.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.registry.client.impl;
import org.apache.hadoop.service.ServiceOperations;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.registry.server.services.MicroZookeeperService;
import org.junit.After;
import org.junit.Assert;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.TestName;
import org.junit.rules.Timeout;
import java.io.IOException;
/**
* Simple tests to look at the micro ZK service itself
*/
public class TestMicroZookeeperService extends Assert {
private MicroZookeeperService zookeeper;
@Rule
public final Timeout testTimeout = new Timeout(10000);
@Rule
public TestName methodName = new TestName();
@After
public void destroyZKServer() throws IOException {
ServiceOperations.stop(zookeeper);
}
@Test
public void testTempDirSupport() throws Throwable {
YarnConfiguration conf = new YarnConfiguration();
zookeeper = new MicroZookeeperService("t1");
zookeeper.init(conf);
zookeeper.start();
zookeeper.stop();
}
}
| 1,860 | 29.508197 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/client/impl/TestCuratorService.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.registry.client.impl;
import org.apache.curator.framework.api.CuratorEvent;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileAlreadyExistsException;
import org.apache.hadoop.fs.PathIsNotEmptyDirectoryException;
import org.apache.hadoop.fs.PathNotFoundException;
import org.apache.hadoop.service.ServiceOperations;
import org.apache.hadoop.registry.AbstractZKRegistryTest;
import org.apache.hadoop.registry.client.impl.zk.CuratorService;
import org.apache.hadoop.registry.client.impl.zk.RegistrySecurity;
import org.apache.zookeeper.CreateMode;
import org.apache.zookeeper.data.ACL;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.util.List;
/**
* Test the curator service
*/
public class TestCuratorService extends AbstractZKRegistryTest {
private static final Logger LOG =
LoggerFactory.getLogger(TestCuratorService.class);
protected CuratorService curatorService;
public static final String MISSING = "/missing";
private List<ACL> rootACL;
@Before
public void startCurator() throws IOException {
createCuratorService();
}
@After
public void stopCurator() {
ServiceOperations.stop(curatorService);
}
/**
* Create an instance
*/
protected void createCuratorService() throws IOException {
curatorService = new CuratorService("curatorService");
curatorService.init(createRegistryConfiguration());
curatorService.start();
rootACL = RegistrySecurity.WorldReadWriteACL;
curatorService.maybeCreate("", CreateMode.PERSISTENT, rootACL, true);
}
@Test
public void testLs() throws Throwable {
curatorService.zkList("/");
}
@Test(expected = PathNotFoundException.class)
public void testLsNotFound() throws Throwable {
List<String> ls = curatorService.zkList(MISSING);
}
@Test
public void testExists() throws Throwable {
assertTrue(curatorService.zkPathExists("/"));
}
@Test
public void testExistsMissing() throws Throwable {
assertFalse(curatorService.zkPathExists(MISSING));
}
@Test
public void testVerifyExists() throws Throwable {
pathMustExist("/");
}
@Test(expected = PathNotFoundException.class)
public void testVerifyExistsMissing() throws Throwable {
pathMustExist("/file-not-found");
}
@Test
public void testMkdirs() throws Throwable {
mkPath("/p1", CreateMode.PERSISTENT);
pathMustExist("/p1");
mkPath("/p1/p2", CreateMode.EPHEMERAL);
pathMustExist("/p1/p2");
}
private void mkPath(String path, CreateMode mode) throws IOException {
curatorService.zkMkPath(path, mode, false,
RegistrySecurity.WorldReadWriteACL);
}
public void pathMustExist(String path) throws IOException {
curatorService.zkPathMustExist(path);
}
@Test(expected = PathNotFoundException.class)
public void testMkdirChild() throws Throwable {
mkPath("/testMkdirChild/child", CreateMode.PERSISTENT);
}
@Test
public void testMaybeCreate() throws Throwable {
assertTrue(curatorService.maybeCreate("/p3", CreateMode.PERSISTENT,
RegistrySecurity.WorldReadWriteACL, false));
assertFalse(curatorService.maybeCreate("/p3", CreateMode.PERSISTENT,
RegistrySecurity.WorldReadWriteACL, false));
}
@Test
public void testRM() throws Throwable {
mkPath("/rm", CreateMode.PERSISTENT);
curatorService.zkDelete("/rm", false, null);
verifyNotExists("/rm");
curatorService.zkDelete("/rm", false, null);
}
@Test
public void testRMNonRf() throws Throwable {
mkPath("/rm", CreateMode.PERSISTENT);
mkPath("/rm/child", CreateMode.PERSISTENT);
try {
curatorService.zkDelete("/rm", false, null);
fail("expected a failure");
} catch (PathIsNotEmptyDirectoryException expected) {
}
}
@Test
public void testRMRf() throws Throwable {
mkPath("/rm", CreateMode.PERSISTENT);
mkPath("/rm/child", CreateMode.PERSISTENT);
curatorService.zkDelete("/rm", true, null);
verifyNotExists("/rm");
curatorService.zkDelete("/rm", true, null);
}
@Test
public void testBackgroundDelete() throws Throwable {
mkPath("/rm", CreateMode.PERSISTENT);
mkPath("/rm/child", CreateMode.PERSISTENT);
CuratorEventCatcher events = new CuratorEventCatcher();
curatorService.zkDelete("/rm", true, events);
CuratorEvent taken = events.take();
LOG.info("took {}", taken);
assertEquals(1, events.getCount());
}
@Test
public void testCreate() throws Throwable {
curatorService.zkCreate("/testcreate",
CreateMode.PERSISTENT, getTestBuffer(),
rootACL
);
pathMustExist("/testcreate");
}
@Test
public void testCreateTwice() throws Throwable {
byte[] buffer = getTestBuffer();
curatorService.zkCreate("/testcreatetwice",
CreateMode.PERSISTENT, buffer,
rootACL);
try {
curatorService.zkCreate("/testcreatetwice",
CreateMode.PERSISTENT, buffer,
rootACL);
fail();
} catch (FileAlreadyExistsException e) {
}
}
@Test
public void testCreateUpdate() throws Throwable {
byte[] buffer = getTestBuffer();
curatorService.zkCreate("/testcreateupdate",
CreateMode.PERSISTENT, buffer,
rootACL
);
curatorService.zkUpdate("/testcreateupdate", buffer);
}
@Test(expected = PathNotFoundException.class)
public void testUpdateMissing() throws Throwable {
curatorService.zkUpdate("/testupdatemissing", getTestBuffer());
}
@Test
public void testUpdateDirectory() throws Throwable {
mkPath("/testupdatedirectory", CreateMode.PERSISTENT);
curatorService.zkUpdate("/testupdatedirectory", getTestBuffer());
}
@Test
public void testUpdateDirectorywithChild() throws Throwable {
mkPath("/testupdatedirectorywithchild", CreateMode.PERSISTENT);
mkPath("/testupdatedirectorywithchild/child", CreateMode.PERSISTENT);
curatorService.zkUpdate("/testupdatedirectorywithchild", getTestBuffer());
}
@Test
public void testUseZKServiceForBinding() throws Throwable {
CuratorService cs2 = new CuratorService("curator", zookeeper);
cs2.init(new Configuration());
cs2.start();
}
protected byte[] getTestBuffer() {
byte[] buffer = new byte[1];
buffer[0] = '0';
return buffer;
}
public void verifyNotExists(String path) throws IOException {
if (curatorService.zkPathExists(path)) {
fail("Path should not exist: " + path);
}
}
}
| 7,371 | 28.488 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/integration/TestRegistryRMOperations.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.registry.integration;
import org.apache.curator.framework.api.BackgroundCallback;
import org.apache.hadoop.fs.PathIsNotEmptyDirectoryException;
import org.apache.hadoop.registry.AbstractRegistryTest;
import org.apache.hadoop.registry.client.api.BindFlags;
import org.apache.hadoop.registry.client.api.RegistryConstants;
import org.apache.hadoop.registry.client.binding.RegistryUtils;
import org.apache.hadoop.registry.client.binding.RegistryPathUtils;
import org.apache.hadoop.registry.client.impl.zk.ZKPathDumper;
import org.apache.hadoop.registry.client.impl.CuratorEventCatcher;
import org.apache.hadoop.registry.client.types.yarn.PersistencePolicies;
import org.apache.hadoop.registry.client.types.RegistryPathStatus;
import org.apache.hadoop.registry.client.types.ServiceRecord;
import org.apache.hadoop.registry.client.types.yarn.YarnRegistryAttributes;
import org.apache.hadoop.registry.server.services.DeleteCompletionCallback;
import org.apache.hadoop.registry.server.services.RegistryAdminService;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.net.URI;
import java.util.Collection;
import java.util.Map;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import static org.apache.hadoop.registry.client.binding.RegistryTypeUtils.inetAddrEndpoint;
import static org.apache.hadoop.registry.client.binding.RegistryTypeUtils.restEndpoint;
public class TestRegistryRMOperations extends AbstractRegistryTest {
protected static final Logger LOG =
LoggerFactory.getLogger(TestRegistryRMOperations.class);
/**
* trigger a purge operation
* @param path path
* @param id yarn ID
* @param policyMatch policy to match ID on
* @param purgePolicy policy when there are children under a match
* @return the number purged
* @throws IOException
*/
public int purge(String path,
String id,
String policyMatch,
RegistryAdminService.PurgePolicy purgePolicy) throws
IOException,
ExecutionException,
InterruptedException {
return purge(path, id, policyMatch, purgePolicy, null);
}
/**
*
* trigger a purge operation
* @param path pathn
* @param id yarn ID
* @param policyMatch policy to match ID on
* @param purgePolicy policy when there are children under a match
* @param callback optional callback
* @return the number purged
* @throws IOException
*/
public int purge(String path,
String id,
String policyMatch,
RegistryAdminService.PurgePolicy purgePolicy,
BackgroundCallback callback) throws
IOException,
ExecutionException,
InterruptedException {
Future<Integer> future = registry.purgeRecordsAsync(path,
id, policyMatch, purgePolicy, callback);
try {
return future.get();
} catch (ExecutionException e) {
if (e.getCause() instanceof IOException) {
throw (IOException) e.getCause();
} else {
throw e;
}
}
}
@Test
public void testPurgeEntryCuratorCallback() throws Throwable {
String path = "/users/example/hbase/hbase1/";
ServiceRecord written = buildExampleServiceEntry(
PersistencePolicies.APPLICATION_ATTEMPT);
written.set(YarnRegistryAttributes.YARN_ID,
"testAsyncPurgeEntry_attempt_001");
operations.mknode(RegistryPathUtils.parentOf(path), true);
operations.bind(path, written, 0);
ZKPathDumper dump = registry.dumpPath(false);
CuratorEventCatcher events = new CuratorEventCatcher();
LOG.info("Initial state {}", dump);
// container query
String id = written.get(YarnRegistryAttributes.YARN_ID, "");
int opcount = purge("/",
id,
PersistencePolicies.CONTAINER,
RegistryAdminService.PurgePolicy.PurgeAll,
events);
assertPathExists(path);
assertEquals(0, opcount);
assertEquals("Event counter", 0, events.getCount());
// now the application attempt
opcount = purge("/",
id,
PersistencePolicies.APPLICATION_ATTEMPT,
RegistryAdminService.PurgePolicy.PurgeAll,
events);
LOG.info("Final state {}", dump);
assertPathNotFound(path);
assertEquals("wrong no of delete operations in " + dump, 1, opcount);
// and validate the callback event
assertEquals("Event counter", 1, events.getCount());
}
@Test
public void testAsyncPurgeEntry() throws Throwable {
String path = "/users/example/hbase/hbase1/";
ServiceRecord written = buildExampleServiceEntry(
PersistencePolicies.APPLICATION_ATTEMPT);
written.set(YarnRegistryAttributes.YARN_ID,
"testAsyncPurgeEntry_attempt_001");
operations.mknode(RegistryPathUtils.parentOf(path), true);
operations.bind(path, written, 0);
ZKPathDumper dump = registry.dumpPath(false);
LOG.info("Initial state {}", dump);
DeleteCompletionCallback deletions = new DeleteCompletionCallback();
int opcount = purge("/",
written.get(YarnRegistryAttributes.YARN_ID, ""),
PersistencePolicies.CONTAINER,
RegistryAdminService.PurgePolicy.PurgeAll,
deletions);
assertPathExists(path);
dump = registry.dumpPath(false);
assertEquals("wrong no of delete operations in " + dump, 0,
deletions.getEventCount());
assertEquals("wrong no of delete operations in " + dump, 0, opcount);
// now app attempt
deletions = new DeleteCompletionCallback();
opcount = purge("/",
written.get(YarnRegistryAttributes.YARN_ID, ""),
PersistencePolicies.APPLICATION_ATTEMPT,
RegistryAdminService.PurgePolicy.PurgeAll,
deletions);
dump = registry.dumpPath(false);
LOG.info("Final state {}", dump);
assertPathNotFound(path);
assertEquals("wrong no of delete operations in " + dump, 1,
deletions.getEventCount());
assertEquals("wrong no of delete operations in " + dump, 1, opcount);
// and validate the callback event
}
@Test
public void testPutGetContainerPersistenceServiceEntry() throws Throwable {
String path = ENTRY_PATH;
ServiceRecord written = buildExampleServiceEntry(
PersistencePolicies.CONTAINER);
operations.mknode(RegistryPathUtils.parentOf(path), true);
operations.bind(path, written, BindFlags.CREATE);
ServiceRecord resolved = operations.resolve(path);
validateEntry(resolved);
assertMatches(written, resolved);
}
/**
* Create a complex example app
* @throws Throwable
*/
@Test
public void testCreateComplexApplication() throws Throwable {
String appId = "application_1408631738011_0001";
String cid = "container_1408631738011_0001_01_";
String cid1 = cid + "000001";
String cid2 = cid + "000002";
String appPath = USERPATH + "tomcat";
ServiceRecord webapp = createRecord(appId,
PersistencePolicies.APPLICATION, "tomcat-based web application",
null);
webapp.addExternalEndpoint(restEndpoint("www",
new URI("http", "//loadbalancer/", null)));
ServiceRecord comp1 = createRecord(cid1, PersistencePolicies.CONTAINER,
null,
null);
comp1.addExternalEndpoint(restEndpoint("www",
new URI("http", "//rack4server3:43572", null)));
comp1.addInternalEndpoint(
inetAddrEndpoint("jmx", "JMX", "rack4server3", 43573));
// Component 2 has a container lifespan
ServiceRecord comp2 = createRecord(cid2, PersistencePolicies.CONTAINER,
null,
null);
comp2.addExternalEndpoint(restEndpoint("www",
new URI("http", "//rack1server28:35881", null)));
comp2.addInternalEndpoint(
inetAddrEndpoint("jmx", "JMX", "rack1server28", 35882));
operations.mknode(USERPATH, false);
operations.bind(appPath, webapp, BindFlags.OVERWRITE);
String componentsPath = appPath + RegistryConstants.SUBPATH_COMPONENTS;
operations.mknode(componentsPath, false);
String dns1 = RegistryPathUtils.encodeYarnID(cid1);
String dns1path = componentsPath + dns1;
operations.bind(dns1path, comp1, BindFlags.CREATE);
String dns2 = RegistryPathUtils.encodeYarnID(cid2);
String dns2path = componentsPath + dns2;
operations.bind(dns2path, comp2, BindFlags.CREATE);
ZKPathDumper pathDumper = registry.dumpPath(false);
LOG.info(pathDumper.toString());
logRecord("tomcat", webapp);
logRecord(dns1, comp1);
logRecord(dns2, comp2);
ServiceRecord dns1resolved = operations.resolve(dns1path);
assertEquals("Persistence policies on resolved entry",
PersistencePolicies.CONTAINER,
dns1resolved.get(YarnRegistryAttributes.YARN_PERSISTENCE, ""));
Map<String, RegistryPathStatus> children =
RegistryUtils.statChildren(operations, componentsPath);
assertEquals(2, children.size());
Collection<RegistryPathStatus>
componentStats = children.values();
Map<String, ServiceRecord> records =
RegistryUtils.extractServiceRecords(operations,
componentsPath, componentStats);
assertEquals(2, records.size());
ServiceRecord retrieved1 = records.get(dns1path);
logRecord(retrieved1.get(YarnRegistryAttributes.YARN_ID, ""), retrieved1);
assertMatches(dns1resolved, retrieved1);
assertEquals(PersistencePolicies.CONTAINER,
retrieved1.get(YarnRegistryAttributes.YARN_PERSISTENCE, ""));
// create a listing under components/
operations.mknode(componentsPath + "subdir", false);
// this shows up in the listing of child entries
Map<String, RegistryPathStatus> childrenUpdated =
RegistryUtils.statChildren(operations, componentsPath);
assertEquals(3, childrenUpdated.size());
// the non-record child this is not picked up in the record listing
Map<String, ServiceRecord> recordsUpdated =
RegistryUtils.extractServiceRecords(operations,
componentsPath,
childrenUpdated);
assertEquals(2, recordsUpdated.size());
// now do some deletions.
// synchronous delete container ID 2
// fail if the app policy is chosen
assertEquals(0, purge("/", cid2, PersistencePolicies.APPLICATION,
RegistryAdminService.PurgePolicy.FailOnChildren));
// succeed for container
assertEquals(1, purge("/", cid2, PersistencePolicies.CONTAINER,
RegistryAdminService.PurgePolicy.FailOnChildren));
assertPathNotFound(dns2path);
assertPathExists(dns1path);
// expect a skip on children to skip
assertEquals(0,
purge("/", appId, PersistencePolicies.APPLICATION,
RegistryAdminService.PurgePolicy.SkipOnChildren));
assertPathExists(appPath);
assertPathExists(dns1path);
// attempt to delete app with policy of fail on children
try {
int p = purge("/",
appId,
PersistencePolicies.APPLICATION,
RegistryAdminService.PurgePolicy.FailOnChildren);
fail("expected a failure, got a purge count of " + p);
} catch (PathIsNotEmptyDirectoryException expected) {
// expected
}
assertPathExists(appPath);
assertPathExists(dns1path);
// now trigger recursive delete
assertEquals(1,
purge("/", appId, PersistencePolicies.APPLICATION,
RegistryAdminService.PurgePolicy.PurgeAll));
assertPathNotFound(appPath);
assertPathNotFound(dns1path);
}
@Test
public void testChildDeletion() throws Throwable {
ServiceRecord app = createRecord("app1",
PersistencePolicies.APPLICATION, "app",
null);
ServiceRecord container = createRecord("container1",
PersistencePolicies.CONTAINER, "container",
null);
operations.bind("/app", app, BindFlags.OVERWRITE);
operations.bind("/app/container", container, BindFlags.OVERWRITE);
try {
int p = purge("/",
"app1",
PersistencePolicies.APPLICATION,
RegistryAdminService.PurgePolicy.FailOnChildren);
fail("expected a failure, got a purge count of " + p);
} catch (PathIsNotEmptyDirectoryException expected) {
// expected
}
}
}
| 12,925 | 33.935135 | 91 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/integration/TestYarnPolicySelector.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.registry.integration;
import org.apache.hadoop.registry.RegistryTestHelper;
import org.apache.hadoop.registry.client.types.yarn.PersistencePolicies;
import org.apache.hadoop.registry.client.types.RegistryPathStatus;
import org.apache.hadoop.registry.client.types.ServiceRecord;
import org.apache.hadoop.registry.server.integration.SelectByYarnPersistence;
import org.apache.hadoop.registry.server.services.RegistryAdminService;
import org.junit.Test;
public class TestYarnPolicySelector extends RegistryTestHelper {
private ServiceRecord record = createRecord("1",
PersistencePolicies.APPLICATION, "one",
null);
private RegistryPathStatus status = new RegistryPathStatus("/", 0, 0, 1);
public void assertSelected(boolean outcome,
RegistryAdminService.NodeSelector selector) {
boolean select = selector.shouldSelect("/", status, record);
assertEquals(selector.toString(), outcome, select);
}
@Test
public void testByContainer() throws Throwable {
assertSelected(false,
new SelectByYarnPersistence("1",
PersistencePolicies.CONTAINER));
}
@Test
public void testByApp() throws Throwable {
assertSelected(true,
new SelectByYarnPersistence("1",
PersistencePolicies.APPLICATION));
}
@Test
public void testByAppName() throws Throwable {
assertSelected(false,
new SelectByYarnPersistence("2",
PersistencePolicies.APPLICATION));
}
}
| 2,290 | 33.712121 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/secure/AbstractSecureRegistryTest.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.registry.secure;
import org.apache.commons.io.FileUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.minikdc.MiniKdc;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authentication.util.KerberosName;
import org.apache.hadoop.service.Service;
import org.apache.hadoop.service.ServiceOperations;
import org.apache.hadoop.registry.RegistryTestHelper;
import org.apache.hadoop.registry.client.impl.zk.RegistrySecurity;
import org.apache.hadoop.registry.client.impl.zk.ZookeeperConfigOptions;
import org.apache.hadoop.registry.server.services.AddingCompositeService;
import org.apache.hadoop.registry.server.services.MicroZookeeperService;
import org.apache.hadoop.registry.server.services.MicroZookeeperServiceKeys;
import org.apache.hadoop.util.Shell;
import org.junit.After;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Rule;
import org.junit.rules.TestName;
import org.junit.rules.Timeout;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.security.auth.Subject;
import javax.security.auth.kerberos.KerberosPrincipal;
import javax.security.auth.login.LoginContext;
import javax.security.auth.login.LoginException;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.security.Principal;
import java.util.HashSet;
import java.util.Properties;
import java.util.Set;
/**
* Add kerberos tests. This is based on the (JUnit3) KerberosSecurityTestcase
* and its test case, <code>TestMiniKdc</code>
*/
public class AbstractSecureRegistryTest extends RegistryTestHelper {
public static final String REALM = "EXAMPLE.COM";
public static final String ZOOKEEPER = "zookeeper";
public static final String ZOOKEEPER_LOCALHOST = "zookeeper/localhost";
public static final String ZOOKEEPER_1270001 = "zookeeper/127.0.0.1";
public static final String ZOOKEEPER_REALM = "zookeeper@" + REALM;
public static final String ZOOKEEPER_CLIENT_CONTEXT = ZOOKEEPER;
public static final String ZOOKEEPER_SERVER_CONTEXT = "ZOOKEEPER_SERVER";
;
public static final String ZOOKEEPER_LOCALHOST_REALM =
ZOOKEEPER_LOCALHOST + "@" + REALM;
public static final String ALICE = "alice";
public static final String ALICE_CLIENT_CONTEXT = "alice";
public static final String ALICE_LOCALHOST = "alice/localhost";
public static final String BOB = "bob";
public static final String BOB_CLIENT_CONTEXT = "bob";
public static final String BOB_LOCALHOST = "bob/localhost";
private static final Logger LOG =
LoggerFactory.getLogger(AbstractSecureRegistryTest.class);
public static final Configuration CONF;
static {
CONF = new Configuration();
CONF.set("hadoop.security.authentication", "kerberos");
CONF.setBoolean("hadoop.security.authorization", true);
}
private static final AddingCompositeService classTeardown =
new AddingCompositeService("classTeardown");
// static initializer guarantees it is always started
// ahead of any @BeforeClass methods
static {
classTeardown.init(CONF);
classTeardown.start();
}
public static final String SUN_SECURITY_KRB5_DEBUG =
"sun.security.krb5.debug";
private final AddingCompositeService teardown =
new AddingCompositeService("teardown");
protected static MiniKdc kdc;
protected static File keytab_zk;
protected static File keytab_bob;
protected static File keytab_alice;
protected static File kdcWorkDir;
protected static Properties kdcConf;
protected static RegistrySecurity registrySecurity;
@Rule
public final Timeout testTimeout = new Timeout(900000);
@Rule
public TestName methodName = new TestName();
protected MicroZookeeperService secureZK;
protected static File jaasFile;
private LoginContext zookeeperLogin;
private static String zkServerPrincipal;
/**
* All class initialization for this test class
* @throws Exception
*/
@BeforeClass
public static void beforeSecureRegistryTestClass() throws Exception {
registrySecurity = new RegistrySecurity("registrySecurity");
registrySecurity.init(CONF);
setupKDCAndPrincipals();
RegistrySecurity.clearJaasSystemProperties();
RegistrySecurity.bindJVMtoJAASFile(jaasFile);
initHadoopSecurity();
}
@AfterClass
public static void afterSecureRegistryTestClass() throws
Exception {
describe(LOG, "teardown of class");
classTeardown.close();
teardownKDC();
}
/**
* give our thread a name
*/
@Before
public void nameThread() {
Thread.currentThread().setName("JUnit");
}
/**
* For unknown reasons, the before-class setting of the JVM properties were
* not being picked up. This method addresses that by setting them
* before every test case
*/
@Before
public void beforeSecureRegistryTest() {
}
@After
public void afterSecureRegistryTest() throws IOException {
describe(LOG, "teardown of instance");
teardown.close();
stopSecureZK();
}
protected static void addToClassTeardown(Service svc) {
classTeardown.addService(svc);
}
protected void addToTeardown(Service svc) {
teardown.addService(svc);
}
public static void teardownKDC() throws Exception {
if (kdc != null) {
kdc.stop();
kdc = null;
}
}
/**
* Sets up the KDC and a set of principals in the JAAS file
*
* @throws Exception
*/
public static void setupKDCAndPrincipals() throws Exception {
// set up the KDC
File target = new File(System.getProperty("test.dir", "target"));
kdcWorkDir = new File(target, "kdc");
kdcWorkDir.mkdirs();
if (!kdcWorkDir.mkdirs()) {
assertTrue(kdcWorkDir.isDirectory());
}
kdcConf = MiniKdc.createConf();
kdcConf.setProperty(MiniKdc.DEBUG, "true");
kdc = new MiniKdc(kdcConf, kdcWorkDir);
kdc.start();
keytab_zk = createKeytab(ZOOKEEPER, "zookeeper.keytab");
keytab_alice = createKeytab(ALICE, "alice.keytab");
keytab_bob = createKeytab(BOB, "bob.keytab");
zkServerPrincipal = Shell.WINDOWS ? ZOOKEEPER_1270001 : ZOOKEEPER_LOCALHOST;
StringBuilder jaas = new StringBuilder(1024);
jaas.append(registrySecurity.createJAASEntry(ZOOKEEPER_CLIENT_CONTEXT,
ZOOKEEPER, keytab_zk));
jaas.append(registrySecurity.createJAASEntry(ZOOKEEPER_SERVER_CONTEXT,
zkServerPrincipal, keytab_zk));
jaas.append(registrySecurity.createJAASEntry(ALICE_CLIENT_CONTEXT,
ALICE_LOCALHOST , keytab_alice));
jaas.append(registrySecurity.createJAASEntry(BOB_CLIENT_CONTEXT,
BOB_LOCALHOST, keytab_bob));
jaasFile = new File(kdcWorkDir, "jaas.txt");
FileUtils.write(jaasFile, jaas.toString());
LOG.info("\n"+ jaas);
RegistrySecurity.bindJVMtoJAASFile(jaasFile);
}
//
protected static final String kerberosRule =
"RULE:[1:$1@$0](.*@EXAMPLE.COM)s/@.*//\nDEFAULT";
/**
* Init hadoop security by setting up the UGI config
*/
public static void initHadoopSecurity() {
UserGroupInformation.setConfiguration(CONF);
KerberosName.setRules(kerberosRule);
}
/**
* Stop the secure ZK and log out the ZK account
*/
public synchronized void stopSecureZK() {
ServiceOperations.stop(secureZK);
secureZK = null;
logout(zookeeperLogin);
zookeeperLogin = null;
}
public static MiniKdc getKdc() {
return kdc;
}
public static File getKdcWorkDir() {
return kdcWorkDir;
}
public static Properties getKdcConf() {
return kdcConf;
}
/**
* Create a secure instance
* @param name instance name
* @return the instance
* @throws Exception
*/
protected static MicroZookeeperService createSecureZKInstance(String name)
throws Exception {
String context = ZOOKEEPER_SERVER_CONTEXT;
Configuration conf = new Configuration();
File testdir = new File(System.getProperty("test.dir", "target"));
File workDir = new File(testdir, name);
if (!workDir.mkdirs()) {
assertTrue(workDir.isDirectory());
}
System.setProperty(
ZookeeperConfigOptions.PROP_ZK_SERVER_MAINTAIN_CONNECTION_DESPITE_SASL_FAILURE,
"false");
RegistrySecurity.validateContext(context);
conf.set(MicroZookeeperServiceKeys.KEY_REGISTRY_ZKSERVICE_JAAS_CONTEXT,
context);
MicroZookeeperService secureZK = new MicroZookeeperService(name);
secureZK.init(conf);
LOG.info(secureZK.getDiagnostics());
return secureZK;
}
/**
* Create the keytabl for the given principal, includes
* raw principal and $principal/localhost
* @param principal principal short name
* @param filename filename of keytab
* @return file of keytab
* @throws Exception
*/
public static File createKeytab(String principal,
String filename) throws Exception {
assertNotEmpty("empty principal", principal);
assertNotEmpty("empty host", filename);
assertNotNull("Null KDC", kdc);
File keytab = new File(kdcWorkDir, filename);
kdc.createPrincipal(keytab,
principal,
principal + "/localhost",
principal + "/127.0.0.1");
return keytab;
}
public static String getPrincipalAndRealm(String principal) {
return principal + "@" + getRealm();
}
protected static String getRealm() {
return kdc.getRealm();
}
/**
* Log in, defaulting to the client context
* @param principal principal
* @param context context
* @param keytab keytab
* @return the logged in context
* @throws LoginException failure to log in
* @throws FileNotFoundException no keytab
*/
protected LoginContext login(String principal,
String context, File keytab) throws LoginException,
FileNotFoundException {
LOG.info("Logging in as {} in context {} with keytab {}",
principal, context, keytab);
if (!keytab.exists()) {
throw new FileNotFoundException(keytab.getAbsolutePath());
}
Set<Principal> principals = new HashSet<Principal>();
principals.add(new KerberosPrincipal(principal));
Subject subject = new Subject(false, principals, new HashSet<Object>(),
new HashSet<Object>());
LoginContext login;
login = new LoginContext(context, subject, null,
KerberosConfiguration.createClientConfig(principal, keytab));
login.login();
return login;
}
/**
* Start the secure ZK instance using the test method name as the path.
* As the entry is saved to the {@link #secureZK} field, it
* is automatically stopped after the test case.
* @throws Exception on any failure
*/
protected synchronized void startSecureZK() throws Exception {
assertNull("Zookeeper is already running", secureZK);
zookeeperLogin = login(zkServerPrincipal,
ZOOKEEPER_SERVER_CONTEXT,
keytab_zk);
secureZK = createSecureZKInstance("test-" + methodName.getMethodName());
secureZK.start();
}
}
| 11,758 | 30.867209 | 87 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/secure/TestSecureRMRegistryOperations.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.registry.secure;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.PathPermissionException;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.service.ServiceStateException;
import org.apache.hadoop.registry.client.api.RegistryConstants;
import org.apache.hadoop.registry.client.api.RegistryOperations;
import org.apache.hadoop.registry.client.api.RegistryOperationsFactory;
import org.apache.hadoop.registry.client.exceptions.NoPathPermissionsException;
import org.apache.hadoop.registry.client.impl.zk.ZKPathDumper;
import org.apache.hadoop.registry.client.impl.RegistryOperationsClient;
import org.apache.hadoop.registry.client.impl.zk.RegistrySecurity;
import org.apache.hadoop.registry.client.impl.zk.ZookeeperConfigOptions;
import org.apache.hadoop.registry.server.integration.RMRegistryOperationsService;
import org.apache.hadoop.registry.server.services.RegistryAdminService;
import org.apache.zookeeper.client.ZooKeeperSaslClient;
import org.apache.zookeeper.data.ACL;
import org.apache.zookeeper.data.Id;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.security.auth.login.LoginException;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.security.PrivilegedExceptionAction;
import java.util.List;
import static org.apache.hadoop.registry.client.api.RegistryConstants.*;
/**
* Verify that the {@link RMRegistryOperationsService} works securely
*/
public class TestSecureRMRegistryOperations extends AbstractSecureRegistryTest {
private static final Logger LOG =
LoggerFactory.getLogger(TestSecureRMRegistryOperations.class);
private Configuration secureConf;
private Configuration zkClientConf;
private UserGroupInformation zookeeperUGI;
@Before
public void setupTestSecureRMRegistryOperations() throws Exception {
startSecureZK();
secureConf = new Configuration();
secureConf.setBoolean(KEY_REGISTRY_SECURE, true);
// create client conf containing the ZK quorum
zkClientConf = new Configuration(secureZK.getConfig());
zkClientConf.setBoolean(KEY_REGISTRY_SECURE, true);
assertNotEmpty(zkClientConf.get(RegistryConstants.KEY_REGISTRY_ZK_QUORUM));
// ZK is in charge
secureConf.set(KEY_REGISTRY_SYSTEM_ACCOUNTS, "sasl:zookeeper@");
zookeeperUGI = loginUGI(ZOOKEEPER, keytab_zk);
}
@After
public void teardownTestSecureRMRegistryOperations() {
}
/**
* Create the RM registry operations as the current user
* @return the service
* @throws LoginException
* @throws FileNotFoundException
*/
public RMRegistryOperationsService startRMRegistryOperations() throws
LoginException, IOException, InterruptedException {
// kerberos
secureConf.set(KEY_REGISTRY_CLIENT_AUTH,
REGISTRY_CLIENT_AUTH_KERBEROS);
secureConf.set(KEY_REGISTRY_CLIENT_JAAS_CONTEXT, ZOOKEEPER_CLIENT_CONTEXT);
RMRegistryOperationsService registryOperations = zookeeperUGI.doAs(
new PrivilegedExceptionAction<RMRegistryOperationsService>() {
@Override
public RMRegistryOperationsService run() throws Exception {
RMRegistryOperationsService operations
= new RMRegistryOperationsService("rmregistry", secureZK);
addToTeardown(operations);
operations.init(secureConf);
LOG.info(operations.bindingDiagnosticDetails());
operations.start();
return operations;
}
});
return registryOperations;
}
/**
* test that ZK can write as itself
* @throws Throwable
*/
@Test
public void testZookeeperCanWriteUnderSystem() throws Throwable {
RMRegistryOperationsService rmRegistryOperations =
startRMRegistryOperations();
RegistryOperations operations = rmRegistryOperations;
operations.mknode(PATH_SYSTEM_SERVICES + "hdfs",
false);
ZKPathDumper pathDumper = rmRegistryOperations.dumpPath(true);
LOG.info(pathDumper.toString());
}
@Test
public void testAnonReadAccess() throws Throwable {
RMRegistryOperationsService rmRegistryOperations =
startRMRegistryOperations();
describe(LOG, "testAnonReadAccess");
RegistryOperations operations =
RegistryOperationsFactory.createAnonymousInstance(zkClientConf);
addToTeardown(operations);
operations.start();
assertFalse("RegistrySecurity.isClientSASLEnabled()==true",
RegistrySecurity.isClientSASLEnabled());
operations.list(PATH_SYSTEM_SERVICES);
}
@Test
public void testAnonNoWriteAccess() throws Throwable {
RMRegistryOperationsService rmRegistryOperations =
startRMRegistryOperations();
describe(LOG, "testAnonNoWriteAccess");
RegistryOperations operations =
RegistryOperationsFactory.createAnonymousInstance(zkClientConf);
addToTeardown(operations);
operations.start();
String servicePath = PATH_SYSTEM_SERVICES + "hdfs";
expectMkNodeFailure(operations, servicePath);
}
@Test
public void testAnonNoWriteAccessOffRoot() throws Throwable {
RMRegistryOperationsService rmRegistryOperations =
startRMRegistryOperations();
describe(LOG, "testAnonNoWriteAccessOffRoot");
RegistryOperations operations =
RegistryOperationsFactory.createAnonymousInstance(zkClientConf);
addToTeardown(operations);
operations.start();
assertFalse("mknode(/)", operations.mknode("/", false));
expectMkNodeFailure(operations, "/sub");
expectDeleteFailure(operations, PATH_SYSTEM_SERVICES, true);
}
/**
* Expect a mknode operation to fail
* @param operations operations instance
* @param path path
* @throws IOException An IO failure other than those permitted
*/
public void expectMkNodeFailure(RegistryOperations operations,
String path) throws IOException {
try {
operations.mknode(path, false);
fail("should have failed to create a node under " + path);
} catch (PathPermissionException expected) {
// expected
} catch (NoPathPermissionsException expected) {
// expected
}
}
/**
* Expect a delete operation to fail
* @param operations operations instance
* @param path path
* @param recursive
* @throws IOException An IO failure other than those permitted
*/
public void expectDeleteFailure(RegistryOperations operations,
String path, boolean recursive) throws IOException {
try {
operations.delete(path, recursive);
fail("should have failed to delete the node " + path);
} catch (PathPermissionException expected) {
// expected
} catch (NoPathPermissionsException expected) {
// expected
}
}
@Test
public void testAlicePathRestrictedAnonAccess() throws Throwable {
RMRegistryOperationsService rmRegistryOperations =
startRMRegistryOperations();
String aliceHome = rmRegistryOperations.initUserRegistry(ALICE);
describe(LOG, "Creating anonymous accessor");
RegistryOperations anonOperations =
RegistryOperationsFactory.createAnonymousInstance(zkClientConf);
addToTeardown(anonOperations);
anonOperations.start();
anonOperations.list(aliceHome);
expectMkNodeFailure(anonOperations, aliceHome + "/anon");
expectDeleteFailure(anonOperations, aliceHome, true);
}
@Test
public void testUserZookeeperHomePathAccess() throws Throwable {
RMRegistryOperationsService rmRegistryOperations =
startRMRegistryOperations();
final String home = rmRegistryOperations.initUserRegistry(ZOOKEEPER);
describe(LOG, "Creating ZK client");
RegistryOperations operations = zookeeperUGI.doAs(
new PrivilegedExceptionAction<RegistryOperations>() {
@Override
public RegistryOperations run() throws Exception {
RegistryOperations operations =
RegistryOperationsFactory.createKerberosInstance(zkClientConf,
ZOOKEEPER_CLIENT_CONTEXT);
addToTeardown(operations);
operations.start();
return operations;
}
});
operations.list(home);
String path = home + "/subpath";
operations.mknode(path, false);
operations.delete(path, true);
}
@Test
public void testUserHomedirsPermissionsRestricted() throws Throwable {
// test that the /users/$user permissions are restricted
RMRegistryOperationsService rmRegistryOperations =
startRMRegistryOperations();
// create Alice's dir, so it should have an ACL for Alice
final String home = rmRegistryOperations.initUserRegistry(ALICE);
List<ACL> acls = rmRegistryOperations.zkGetACLS(home);
ACL aliceACL = null;
for (ACL acl : acls) {
LOG.info(RegistrySecurity.aclToString(acl));
Id id = acl.getId();
if (id.getScheme().equals(ZookeeperConfigOptions.SCHEME_SASL)
&& id.getId().startsWith(ALICE)) {
aliceACL = acl;
break;
}
}
assertNotNull(aliceACL);
assertEquals(RegistryAdminService.USER_HOMEDIR_ACL_PERMISSIONS,
aliceACL.getPerms());
}
@Test
public void testDigestAccess() throws Throwable {
RMRegistryOperationsService registryAdmin =
startRMRegistryOperations();
String id = "username";
String pass = "password";
registryAdmin.addWriteAccessor(id, pass);
List<ACL> clientAcls = registryAdmin.getClientAcls();
LOG.info("Client ACLS=\n{}", RegistrySecurity.aclsToString(clientAcls));
String base = "/digested";
registryAdmin.mknode(base, false);
List<ACL> baseACLs = registryAdmin.zkGetACLS(base);
String aclset = RegistrySecurity.aclsToString(baseACLs);
LOG.info("Base ACLs=\n{}", aclset);
ACL found = null;
for (ACL acl : baseACLs) {
if (ZookeeperConfigOptions.SCHEME_DIGEST.equals(acl.getId().getScheme())) {
found = acl;
break;
}
}
assertNotNull("Did not find digest entry in ACLs " + aclset, found);
zkClientConf.set(KEY_REGISTRY_USER_ACCOUNTS,
"sasl:[email protected], sasl:other");
RegistryOperations operations =
RegistryOperationsFactory.createAuthenticatedInstance(zkClientConf,
id,
pass);
addToTeardown(operations);
operations.start();
RegistryOperationsClient operationsClient =
(RegistryOperationsClient) operations;
List<ACL> digestClientACLs = operationsClient.getClientAcls();
LOG.info("digest client ACLs=\n{}",
RegistrySecurity.aclsToString(digestClientACLs));
operations.stat(base);
operations.mknode(base + "/subdir", false);
ZKPathDumper pathDumper = registryAdmin.dumpPath(true);
LOG.info(pathDumper.toString());
}
@Test(expected = IllegalArgumentException.class)
public void testNoDigestAuthMissingId() throws Throwable {
RegistryOperationsFactory.createAuthenticatedInstance(zkClientConf,
"",
"pass");
}
@Test(expected = ServiceStateException.class)
public void testNoDigestAuthMissingId2() throws Throwable {
zkClientConf.set(KEY_REGISTRY_CLIENT_AUTH, REGISTRY_CLIENT_AUTH_DIGEST);
zkClientConf.set(KEY_REGISTRY_CLIENT_AUTHENTICATION_ID, "");
zkClientConf.set(KEY_REGISTRY_CLIENT_AUTHENTICATION_PASSWORD, "pass");
RegistryOperationsFactory.createInstance("DigestRegistryOperations",
zkClientConf);
}
@Test(expected = IllegalArgumentException.class)
public void testNoDigestAuthMissingPass() throws Throwable {
RegistryOperationsFactory.createAuthenticatedInstance(zkClientConf,
"id",
"");
}
@Test(expected = ServiceStateException.class)
public void testNoDigestAuthMissingPass2() throws Throwable {
zkClientConf.set(KEY_REGISTRY_CLIENT_AUTH, REGISTRY_CLIENT_AUTH_DIGEST);
zkClientConf.set(KEY_REGISTRY_CLIENT_AUTHENTICATION_ID, "id");
zkClientConf.set(KEY_REGISTRY_CLIENT_AUTHENTICATION_PASSWORD, "");
RegistryOperationsFactory.createInstance("DigestRegistryOperations",
zkClientConf);
}
}
| 12,902 | 35.971347 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/secure/TestSecureLogins.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.registry.secure;
import com.sun.security.auth.module.Krb5LoginModule;
import org.apache.commons.io.FileUtils;
import org.apache.hadoop.security.HadoopKerberosName;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authentication.util.KerberosName;
import org.apache.hadoop.security.authentication.util.KerberosUtil;
import org.apache.hadoop.util.Shell;
import org.apache.hadoop.registry.client.impl.zk.RegistrySecurity;
import org.apache.hadoop.registry.client.impl.zk.ZookeeperConfigOptions;
import org.apache.zookeeper.Environment;
import org.apache.zookeeper.data.ACL;
import org.junit.Assume;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.security.auth.Subject;
import javax.security.auth.kerberos.KerberosPrincipal;
import javax.security.auth.login.LoginContext;
import javax.security.auth.login.LoginException;
import java.io.File;
import java.io.IOException;
import java.security.Principal;
import java.security.PrivilegedExceptionAction;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
/**
* Verify that logins work
*/
public class TestSecureLogins extends AbstractSecureRegistryTest {
private static final Logger LOG =
LoggerFactory.getLogger(TestSecureLogins.class);
@Test
public void testHasRealm() throws Throwable {
assertNotNull(getRealm());
LOG.info("ZK principal = {}", getPrincipalAndRealm(ZOOKEEPER_LOCALHOST));
}
@Test
public void testJaasFileSetup() throws Throwable {
// the JVM has seemed inconsistent on setting up here
assertNotNull("jaasFile", jaasFile);
String confFilename = System.getProperty(Environment.JAAS_CONF_KEY);
assertEquals(jaasFile.getAbsolutePath(), confFilename);
}
@Test
public void testJaasFileBinding() throws Throwable {
// the JVM has seemed inconsistent on setting up here
assertNotNull("jaasFile", jaasFile);
RegistrySecurity.bindJVMtoJAASFile(jaasFile);
String confFilename = System.getProperty(Environment.JAAS_CONF_KEY);
assertEquals(jaasFile.getAbsolutePath(), confFilename);
}
@Test
public void testClientLogin() throws Throwable {
LoginContext client = login(ALICE_LOCALHOST,
ALICE_CLIENT_CONTEXT,
keytab_alice);
try {
logLoginDetails(ALICE_LOCALHOST, client);
String confFilename = System.getProperty(Environment.JAAS_CONF_KEY);
assertNotNull("Unset: "+ Environment.JAAS_CONF_KEY, confFilename);
String config = FileUtils.readFileToString(new File(confFilename));
LOG.info("{}=\n{}", confFilename, config);
RegistrySecurity.setZKSaslClientProperties(ALICE, ALICE_CLIENT_CONTEXT);
} finally {
client.logout();
}
}
@Test
public void testZKServerContextLogin() throws Throwable {
LoginContext client = login(ZOOKEEPER_LOCALHOST,
ZOOKEEPER_SERVER_CONTEXT,
keytab_zk);
logLoginDetails(ZOOKEEPER_LOCALHOST, client);
client.logout();
}
@Test
public void testServerLogin() throws Throwable {
LoginContext loginContext = createLoginContextZookeeperLocalhost();
loginContext.login();
loginContext.logout();
}
public LoginContext createLoginContextZookeeperLocalhost() throws
LoginException {
String principalAndRealm = getPrincipalAndRealm(ZOOKEEPER_LOCALHOST);
Set<Principal> principals = new HashSet<Principal>();
principals.add(new KerberosPrincipal(ZOOKEEPER_LOCALHOST));
Subject subject = new Subject(false, principals, new HashSet<Object>(),
new HashSet<Object>());
return new LoginContext("", subject, null,
KerberosConfiguration.createServerConfig(ZOOKEEPER_LOCALHOST, keytab_zk));
}
@Test
public void testKerberosAuth() throws Throwable {
File krb5conf = getKdc().getKrb5conf();
String krbConfig = FileUtils.readFileToString(krb5conf);
LOG.info("krb5.conf at {}:\n{}", krb5conf, krbConfig);
Subject subject = new Subject();
final Krb5LoginModule krb5LoginModule = new Krb5LoginModule();
final Map<String, String> options = new HashMap<String, String>();
options.put("keyTab", keytab_alice.getAbsolutePath());
options.put("principal", ALICE_LOCALHOST);
options.put("debug", "true");
options.put("doNotPrompt", "true");
options.put("isInitiator", "true");
options.put("refreshKrb5Config", "true");
options.put("renewTGT", "true");
options.put("storeKey", "true");
options.put("useKeyTab", "true");
options.put("useTicketCache", "true");
krb5LoginModule.initialize(subject, null,
new HashMap<String, String>(),
options);
boolean loginOk = krb5LoginModule.login();
assertTrue("Failed to login", loginOk);
boolean commitOk = krb5LoginModule.commit();
assertTrue("Failed to Commit", commitOk);
}
@Test
public void testDefaultRealmValid() throws Throwable {
String defaultRealm = KerberosUtil.getDefaultRealm();
assertNotEmpty("No default Kerberos Realm",
defaultRealm);
LOG.info("Default Realm '{}'", defaultRealm);
}
@Test
public void testKerberosRulesValid() throws Throwable {
assertTrue("!KerberosName.hasRulesBeenSet()",
KerberosName.hasRulesBeenSet());
String rules = KerberosName.getRules();
assertEquals(kerberosRule, rules);
LOG.info(rules);
}
@Test
public void testValidKerberosName() throws Throwable {
new HadoopKerberosName(ZOOKEEPER).getShortName();
new HadoopKerberosName(ZOOKEEPER_LOCALHOST).getShortName();
new HadoopKerberosName(ZOOKEEPER_REALM).getShortName();
// standard rules don't pick this up
// new HadoopKerberosName(ZOOKEEPER_LOCALHOST_REALM).getShortName();
}
@Test
public void testUGILogin() throws Throwable {
UserGroupInformation ugi = loginUGI(ZOOKEEPER, keytab_zk);
RegistrySecurity.UgiInfo ugiInfo =
new RegistrySecurity.UgiInfo(ugi);
LOG.info("logged in as: {}", ugiInfo);
assertTrue("security is not enabled: " + ugiInfo,
UserGroupInformation.isSecurityEnabled());
assertTrue("login is keytab based: " + ugiInfo,
ugi.isFromKeytab());
// now we are here, build a SASL ACL
ACL acl = ugi.doAs(new PrivilegedExceptionAction<ACL>() {
@Override
public ACL run() throws Exception {
return registrySecurity.createSaslACLFromCurrentUser(0);
}
});
assertEquals(ZOOKEEPER_REALM, acl.getId().getId());
assertEquals(ZookeeperConfigOptions.SCHEME_SASL, acl.getId().getScheme());
registrySecurity.addSystemACL(acl);
}
}
| 7,522 | 33.990698 | 82 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/secure/TestRegistrySecurityHelper.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.registry.secure;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.registry.client.api.RegistryConstants;
import org.apache.hadoop.registry.client.impl.zk.RegistrySecurity;
import org.apache.zookeeper.ZooDefs;
import org.apache.zookeeper.data.ACL;
import org.junit.Assert;
import org.junit.BeforeClass;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.util.List;
import static org.apache.hadoop.registry.client.api.RegistryConstants.*;
/**
* Test for registry security operations
*/
public class TestRegistrySecurityHelper extends Assert {
private static final Logger LOG =
LoggerFactory.getLogger(TestRegistrySecurityHelper.class);
public static final String YARN_EXAMPLE_COM = "[email protected]";
public static final String SASL_YARN_EXAMPLE_COM =
"sasl:" + YARN_EXAMPLE_COM;
public static final String MAPRED_EXAMPLE_COM = "[email protected]";
public static final String SASL_MAPRED_EXAMPLE_COM =
"sasl:" + MAPRED_EXAMPLE_COM;
public static final String SASL_MAPRED_APACHE = "sasl:mapred@APACHE";
public static final String DIGEST_F0AF = "digest:f0afbeeb00baa";
public static final String SASL_YARN_SHORT = "sasl:yarn@";
public static final String SASL_MAPRED_SHORT = "sasl:mapred@";
public static final String REALM_EXAMPLE_COM = "example.com";
private static RegistrySecurity registrySecurity;
@BeforeClass
public static void setupTestRegistrySecurityHelper() throws IOException {
Configuration conf = new Configuration();
conf.setBoolean(KEY_REGISTRY_SECURE, true);
conf.set(KEY_REGISTRY_KERBEROS_REALM, "KERBEROS");
registrySecurity = new RegistrySecurity("");
// init the ACLs OUTSIDE A KERBEROS CLUSTER
registrySecurity.init(conf);
}
@Test
public void testACLSplitRealmed() throws Throwable {
List<String> pairs =
registrySecurity.splitAclPairs(
SASL_YARN_EXAMPLE_COM +
", " +
SASL_MAPRED_EXAMPLE_COM,
"");
assertEquals(SASL_YARN_EXAMPLE_COM, pairs.get(0));
assertEquals(SASL_MAPRED_EXAMPLE_COM, pairs.get(1));
}
@Test
public void testBuildAclsRealmed() throws Throwable {
List<ACL> acls = registrySecurity.buildACLs(
SASL_YARN_EXAMPLE_COM +
", " +
SASL_MAPRED_EXAMPLE_COM,
"",
ZooDefs.Perms.ALL);
assertEquals(YARN_EXAMPLE_COM, acls.get(0).getId().getId());
assertEquals(MAPRED_EXAMPLE_COM, acls.get(1).getId().getId());
}
@Test
public void testACLDefaultRealm() throws Throwable {
List<String> pairs =
registrySecurity.splitAclPairs(
SASL_YARN_SHORT +
", " +
SASL_MAPRED_SHORT,
REALM_EXAMPLE_COM);
assertEquals(SASL_YARN_EXAMPLE_COM, pairs.get(0));
assertEquals(SASL_MAPRED_EXAMPLE_COM, pairs.get(1));
}
@Test
public void testBuildAclsDefaultRealm() throws Throwable {
List<ACL> acls = registrySecurity.buildACLs(
SASL_YARN_SHORT +
", " +
SASL_MAPRED_SHORT,
REALM_EXAMPLE_COM, ZooDefs.Perms.ALL);
assertEquals(YARN_EXAMPLE_COM, acls.get(0).getId().getId());
assertEquals(MAPRED_EXAMPLE_COM, acls.get(1).getId().getId());
}
@Test
public void testACLSplitNullRealm() throws Throwable {
List<String> pairs =
registrySecurity.splitAclPairs(
SASL_YARN_SHORT +
", " +
SASL_MAPRED_SHORT,
"");
assertEquals(SASL_YARN_SHORT, pairs.get(0));
assertEquals(SASL_MAPRED_SHORT, pairs.get(1));
}
@Test(expected = IllegalArgumentException.class)
public void testBuildAclsNullRealm() throws Throwable {
registrySecurity.buildACLs(
SASL_YARN_SHORT +
", " +
SASL_MAPRED_SHORT,
"", ZooDefs.Perms.ALL);
fail("");
}
@Test
public void testACLDefaultRealmOnlySASL() throws Throwable {
List<String> pairs =
registrySecurity.splitAclPairs(
SASL_YARN_SHORT +
", " +
DIGEST_F0AF,
REALM_EXAMPLE_COM);
assertEquals(SASL_YARN_EXAMPLE_COM, pairs.get(0));
assertEquals(DIGEST_F0AF, pairs.get(1));
}
@Test
public void testACLSplitMixed() throws Throwable {
List<String> pairs =
registrySecurity.splitAclPairs(
SASL_YARN_SHORT +
", " +
SASL_MAPRED_APACHE +
", ,," +
DIGEST_F0AF,
REALM_EXAMPLE_COM);
assertEquals(SASL_YARN_EXAMPLE_COM, pairs.get(0));
assertEquals(SASL_MAPRED_APACHE, pairs.get(1));
assertEquals(DIGEST_F0AF, pairs.get(2));
}
@Test
public void testDefaultAClsValid() throws Throwable {
registrySecurity.buildACLs(
RegistryConstants.DEFAULT_REGISTRY_SYSTEM_ACCOUNTS,
REALM_EXAMPLE_COM, ZooDefs.Perms.ALL);
}
@Test
public void testDefaultRealm() throws Throwable {
String realm = RegistrySecurity.getDefaultRealmInJVM();
LOG.info("Realm {}", realm);
}
@Test
public void testUGIProperties() throws Throwable {
UserGroupInformation user = UserGroupInformation.getCurrentUser();
ACL acl = registrySecurity.createACLForUser(user, ZooDefs.Perms.ALL);
assertFalse(RegistrySecurity.ALL_READWRITE_ACCESS.equals(acl));
LOG.info("User {} has ACL {}", user, acl);
}
@Test
public void testSecurityImpliesKerberos() throws Throwable {
Configuration conf = new Configuration();
conf.setBoolean("hadoop.security.authentication", true);
conf.setBoolean(KEY_REGISTRY_SECURE, true);
conf.set(KEY_REGISTRY_KERBEROS_REALM, "KERBEROS");
RegistrySecurity security = new RegistrySecurity("registry security");
try {
security.init(conf);
} catch (Exception e) {
assertTrue(
"did not find "+ RegistrySecurity.E_NO_KERBEROS + " in " + e,
e.toString().contains(RegistrySecurity.E_NO_KERBEROS));
}
}
}
| 6,828 | 31.212264 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/secure/TestSecureRegistry.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.registry.secure;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.service.ServiceOperations;
import org.apache.hadoop.registry.client.impl.zk.ZKPathDumper;
import org.apache.hadoop.registry.client.impl.zk.CuratorService;
import org.apache.hadoop.registry.client.impl.zk.RegistrySecurity;
import org.apache.zookeeper.CreateMode;
import org.apache.zookeeper.Login;
import org.apache.zookeeper.server.ZooKeeperSaslServer;
import org.apache.zookeeper.server.auth.SaslServerCallbackHandler;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.security.auth.login.AppConfigurationEntry;
import javax.security.auth.login.LoginContext;
import static org.apache.hadoop.registry.client.api.RegistryConstants.*;
/**
* Verify that the Mini ZK service can be started up securely
*/
public class TestSecureRegistry extends AbstractSecureRegistryTest {
private static final Logger LOG =
LoggerFactory.getLogger(TestSecureRegistry.class);
@Before
public void beforeTestSecureZKService() throws Throwable {
enableKerberosDebugging();
}
@After
public void afterTestSecureZKService() throws Throwable {
disableKerberosDebugging();
RegistrySecurity.clearZKSaslClientProperties();
}
/**
* this is a cut and paste of some of the ZK internal code that was
* failing on windows and swallowing its exceptions
*/
@Test
public void testLowlevelZKSaslLogin() throws Throwable {
RegistrySecurity.bindZKToServerJAASContext(ZOOKEEPER_SERVER_CONTEXT);
String serverSection =
System.getProperty(ZooKeeperSaslServer.LOGIN_CONTEXT_NAME_KEY,
ZooKeeperSaslServer.DEFAULT_LOGIN_CONTEXT_NAME);
assertEquals(ZOOKEEPER_SERVER_CONTEXT, serverSection);
AppConfigurationEntry entries[];
entries = javax.security.auth.login.Configuration.getConfiguration()
.getAppConfigurationEntry(
serverSection);
assertNotNull("null entries", entries);
SaslServerCallbackHandler saslServerCallbackHandler =
new SaslServerCallbackHandler(
javax.security.auth.login.Configuration.getConfiguration());
Login login = new Login(serverSection, saslServerCallbackHandler);
try {
login.startThreadIfNeeded();
} finally {
login.shutdown();
}
}
@Test
public void testCreateSecureZK() throws Throwable {
startSecureZK();
secureZK.stop();
}
@Test
public void testInsecureClientToZK() throws Throwable {
startSecureZK();
userZookeeperToCreateRoot();
RegistrySecurity.clearZKSaslClientProperties();
CuratorService curatorService =
startCuratorServiceInstance("insecure client", false);
curatorService.zkList("/");
curatorService.zkMkPath("", CreateMode.PERSISTENT, false,
RegistrySecurity.WorldReadWriteACL);
}
/**
* test that ZK can write as itself
* @throws Throwable
*/
@Test
public void testZookeeperCanWrite() throws Throwable {
System.setProperty("curator-log-events", "true");
startSecureZK();
CuratorService curator = null;
LoginContext login = login(ZOOKEEPER_LOCALHOST,
ZOOKEEPER_CLIENT_CONTEXT,
keytab_zk);
try {
logLoginDetails(ZOOKEEPER, login);
RegistrySecurity.setZKSaslClientProperties(ZOOKEEPER,
ZOOKEEPER_CLIENT_CONTEXT);
curator = startCuratorServiceInstance("ZK", true);
LOG.info(curator.toString());
addToTeardown(curator);
curator.zkMkPath("/", CreateMode.PERSISTENT, false,
RegistrySecurity.WorldReadWriteACL);
curator.zkList("/");
curator.zkMkPath("/zookeeper", CreateMode.PERSISTENT, false,
RegistrySecurity.WorldReadWriteACL);
} finally {
logout(login);
ServiceOperations.stop(curator);
}
}
/**
* Start a curator service instance
* @param name name
* @param secure flag to indicate the cluster is secure
* @return an inited and started curator service
*/
protected CuratorService startCuratorServiceInstance(String name,
boolean secure) {
Configuration clientConf = new Configuration();
clientConf.set(KEY_REGISTRY_ZK_ROOT, "/");
clientConf.setBoolean(KEY_REGISTRY_SECURE, secure);
describe(LOG, "Starting Curator service");
CuratorService curatorService = new CuratorService(name, secureZK);
curatorService.init(clientConf);
curatorService.start();
LOG.info("Curator Binding {}",
curatorService.bindingDiagnosticDetails());
return curatorService;
}
/**
* have the ZK user create the root dir.
* This logs out the ZK user after and stops its curator instance,
* to avoid contamination
* @throws Throwable
*/
public void userZookeeperToCreateRoot() throws Throwable {
System.setProperty("curator-log-events", "true");
CuratorService curator = null;
LoginContext login = login(ZOOKEEPER_LOCALHOST,
ZOOKEEPER_CLIENT_CONTEXT,
keytab_zk);
try {
logLoginDetails(ZOOKEEPER, login);
RegistrySecurity.setZKSaslClientProperties(ZOOKEEPER,
ZOOKEEPER_CLIENT_CONTEXT);
curator = startCuratorServiceInstance("ZK", true);
LOG.info(curator.toString());
addToTeardown(curator);
curator.zkMkPath("/", CreateMode.PERSISTENT, false,
RegistrySecurity.WorldReadWriteACL);
ZKPathDumper pathDumper = curator.dumpPath(true);
LOG.info(pathDumper.toString());
} finally {
logout(login);
ServiceOperations.stop(curator);
}
}
}
| 6,537 | 33.052083 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/secure/KerberosConfiguration.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.registry.secure;
import org.apache.hadoop.security.authentication.util.KerberosUtil;
import javax.security.auth.login.AppConfigurationEntry;
import java.io.File;
import java.util.HashMap;
import java.util.Map;
class KerberosConfiguration extends javax.security.auth.login.Configuration {
private String principal;
private String keytab;
private boolean isInitiator;
KerberosConfiguration(String principal, File keytab,
boolean client) {
this.principal = principal;
this.keytab = keytab.getAbsolutePath();
this.isInitiator = client;
}
public static javax.security.auth.login.Configuration createClientConfig(
String principal,
File keytab) {
return new KerberosConfiguration(principal, keytab, true);
}
public static javax.security.auth.login.Configuration createServerConfig(
String principal,
File keytab) {
return new KerberosConfiguration(principal, keytab, false);
}
@Override
public AppConfigurationEntry[] getAppConfigurationEntry(String name) {
Map<String, String> options = new HashMap<String, String>();
options.put("keyTab", keytab);
options.put("principal", principal);
options.put("useKeyTab", "true");
options.put("storeKey", "true");
options.put("doNotPrompt", "true");
options.put("useTicketCache", "true");
options.put("renewTGT", "true");
options.put("refreshKrb5Config", "true");
options.put("isInitiator", Boolean.toString(isInitiator));
String ticketCache = System.getenv("KRB5CCNAME");
if (ticketCache != null) {
options.put("ticketCache", ticketCache);
}
options.put("debug", "true");
return new AppConfigurationEntry[]{
new AppConfigurationEntry(KerberosUtil.getKrb5LoginModuleName(),
AppConfigurationEntry.LoginModuleControlFlag.REQUIRED,
options)
};
}
@Override
public String toString() {
return "KerberosConfiguration with principal " + principal;
}
}
| 2,806 | 33.231707 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/operations/TestRegistryOperations.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.registry.operations;
import org.apache.hadoop.fs.FileAlreadyExistsException;
import org.apache.hadoop.fs.PathIsNotEmptyDirectoryException;
import org.apache.hadoop.fs.PathNotFoundException;
import org.apache.hadoop.registry.AbstractRegistryTest;
import org.apache.hadoop.registry.client.api.BindFlags;
import org.apache.hadoop.registry.client.binding.RegistryTypeUtils;
import org.apache.hadoop.registry.client.binding.RegistryUtils;
import org.apache.hadoop.registry.client.binding.RegistryPathUtils;
import org.apache.hadoop.registry.client.exceptions.NoRecordException;
import org.apache.hadoop.registry.client.types.yarn.PersistencePolicies;
import org.apache.hadoop.registry.client.types.RegistryPathStatus;
import org.apache.hadoop.registry.client.types.ServiceRecord;
import org.apache.hadoop.registry.client.types.yarn.YarnRegistryAttributes;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
public class TestRegistryOperations extends AbstractRegistryTest {
protected static final Logger LOG =
LoggerFactory.getLogger(TestRegistryOperations.class);
@Test
public void testPutGetServiceEntry() throws Throwable {
ServiceRecord written = putExampleServiceEntry(ENTRY_PATH, 0,
PersistencePolicies.APPLICATION);
ServiceRecord resolved = operations.resolve(ENTRY_PATH);
validateEntry(resolved);
assertMatches(written, resolved);
}
@Test
public void testDeleteServiceEntry() throws Throwable {
putExampleServiceEntry(ENTRY_PATH, 0);
operations.delete(ENTRY_PATH, false);
}
@Test
public void testDeleteNonexistentEntry() throws Throwable {
operations.delete(ENTRY_PATH, false);
operations.delete(ENTRY_PATH, true);
}
@Test
public void testStat() throws Throwable {
putExampleServiceEntry(ENTRY_PATH, 0);
RegistryPathStatus stat = operations.stat(ENTRY_PATH);
assertTrue(stat.size > 0);
assertTrue(stat.time > 0);
assertEquals(NAME, stat.path);
}
@Test
public void testLsParent() throws Throwable {
ServiceRecord written = putExampleServiceEntry(ENTRY_PATH, 0);
RegistryPathStatus stat = operations.stat(ENTRY_PATH);
List<String> children = operations.list(PARENT_PATH);
assertEquals(1, children.size());
assertEquals(NAME, children.get(0));
Map<String, RegistryPathStatus> childStats =
RegistryUtils.statChildren(operations, PARENT_PATH);
assertEquals(1, childStats.size());
assertEquals(stat, childStats.get(NAME));
Map<String, ServiceRecord> records =
RegistryUtils.extractServiceRecords(operations,
PARENT_PATH,
childStats.values());
assertEquals(1, records.size());
ServiceRecord record = records.get(ENTRY_PATH);
RegistryTypeUtils.validateServiceRecord(ENTRY_PATH, record);
assertMatches(written, record);
}
@Test
public void testDeleteNonEmpty() throws Throwable {
putExampleServiceEntry(ENTRY_PATH, 0);
try {
operations.delete(PARENT_PATH, false);
fail("Expected a failure");
} catch (PathIsNotEmptyDirectoryException expected) {
// expected; ignore
}
operations.delete(PARENT_PATH, true);
}
@Test(expected = PathNotFoundException.class)
public void testStatEmptyPath() throws Throwable {
operations.stat(ENTRY_PATH);
}
@Test(expected = PathNotFoundException.class)
public void testLsEmptyPath() throws Throwable {
operations.list(PARENT_PATH);
}
@Test(expected = PathNotFoundException.class)
public void testResolveEmptyPath() throws Throwable {
operations.resolve(ENTRY_PATH);
}
@Test
public void testMkdirNoParent() throws Throwable {
String path = ENTRY_PATH + "/missing";
try {
operations.mknode(path, false);
RegistryPathStatus stat = operations.stat(path);
fail("Got a status " + stat);
} catch (PathNotFoundException expected) {
// expected
}
}
@Test
public void testDoubleMkdir() throws Throwable {
operations.mknode(USERPATH, false);
String path = USERPATH + "newentry";
assertTrue(operations.mknode(path, false));
operations.stat(path);
assertFalse(operations.mknode(path, false));
}
@Test
public void testPutNoParent() throws Throwable {
ServiceRecord record = new ServiceRecord();
record.set(YarnRegistryAttributes.YARN_ID, "testPutNoParent");
String path = "/path/without/parent";
try {
operations.bind(path, record, 0);
// didn't get a failure
// trouble
RegistryPathStatus stat = operations.stat(path);
fail("Got a status " + stat);
} catch (PathNotFoundException expected) {
// expected
}
}
@Test
public void testPutMinimalRecord() throws Throwable {
String path = "/path/with/minimal";
operations.mknode(path, true);
ServiceRecord record = new ServiceRecord();
operations.bind(path, record, BindFlags.OVERWRITE);
ServiceRecord resolve = operations.resolve(path);
assertMatches(record, resolve);
}
@Test(expected = PathNotFoundException.class)
public void testPutNoParent2() throws Throwable {
ServiceRecord record = new ServiceRecord();
record.set(YarnRegistryAttributes.YARN_ID, "testPutNoParent");
String path = "/path/without/parent";
operations.bind(path, record, 0);
}
@Test
public void testStatDirectory() throws Throwable {
String empty = "/empty";
operations.mknode(empty, false);
operations.stat(empty);
}
@Test
public void testStatRootPath() throws Throwable {
operations.mknode("/", false);
operations.stat("/");
operations.list("/");
operations.list("/");
}
@Test
public void testStatOneLevelDown() throws Throwable {
operations.mknode("/subdir", true);
operations.stat("/subdir");
}
@Test
public void testLsRootPath() throws Throwable {
String empty = "/";
operations.mknode(empty, false);
operations.stat(empty);
}
@Test
public void testResolvePathThatHasNoEntry() throws Throwable {
String empty = "/empty2";
operations.mknode(empty, false);
try {
ServiceRecord record = operations.resolve(empty);
fail("expected an exception, got " + record);
} catch (NoRecordException expected) {
// expected
}
}
@Test
public void testOverwrite() throws Throwable {
ServiceRecord written = putExampleServiceEntry(ENTRY_PATH, 0);
ServiceRecord resolved1 = operations.resolve(ENTRY_PATH);
resolved1.description = "resolved1";
try {
operations.bind(ENTRY_PATH, resolved1, 0);
fail("overwrite succeeded when it should have failed");
} catch (FileAlreadyExistsException expected) {
// expected
}
// verify there's no changed
ServiceRecord resolved2 = operations.resolve(ENTRY_PATH);
assertMatches(written, resolved2);
operations.bind(ENTRY_PATH, resolved1, BindFlags.OVERWRITE);
ServiceRecord resolved3 = operations.resolve(ENTRY_PATH);
assertMatches(resolved1, resolved3);
}
@Test
public void testPutGetContainerPersistenceServiceEntry() throws Throwable {
String path = ENTRY_PATH;
ServiceRecord written = buildExampleServiceEntry(
PersistencePolicies.CONTAINER);
operations.mknode(RegistryPathUtils.parentOf(path), true);
operations.bind(path, written, BindFlags.CREATE);
ServiceRecord resolved = operations.resolve(path);
validateEntry(resolved);
assertMatches(written, resolved);
}
@Test
public void testAddingWriteAccessIsNoOpEntry() throws Throwable {
assertFalse(operations.addWriteAccessor("id","pass"));
operations.clearWriteAccessors();
}
@Test
public void testListListFully() throws Throwable {
ServiceRecord r1 = new ServiceRecord();
ServiceRecord r2 = createRecord("i",
PersistencePolicies.PERMANENT, "r2");
String path = USERPATH + SC_HADOOP + "/listing" ;
operations.mknode(path, true);
String r1path = path + "/r1";
operations.bind(r1path, r1, 0);
String r2path = path + "/r2";
operations.bind(r2path, r2, 0);
RegistryPathStatus r1stat = operations.stat(r1path);
assertEquals("r1", r1stat.path);
RegistryPathStatus r2stat = operations.stat(r2path);
assertEquals("r2", r2stat.path);
assertNotEquals(r1stat, r2stat);
// listings now
List<String> list = operations.list(path);
assertEquals("Wrong no. of children", 2, list.size());
// there's no order here, so create one
Map<String, String> names = new HashMap<String, String>();
String entries = "";
for (String child : list) {
names.put(child, child);
entries += child + " ";
}
assertTrue("No 'r1' in " + entries,
names.containsKey("r1"));
assertTrue("No 'r2' in " + entries,
names.containsKey("r2"));
Map<String, RegistryPathStatus> stats =
RegistryUtils.statChildren(operations, path);
assertEquals("Wrong no. of children", 2, stats.size());
assertEquals(r1stat, stats.get("r1"));
assertEquals(r2stat, stats.get("r2"));
}
@Test
public void testComplexUsernames() throws Throwable {
operations.mknode("/users/user with spaces", true);
operations.mknode("/users/user-with_underscores", true);
operations.mknode("/users/000000", true);
operations.mknode("/users/-storm", true);
operations.mknode("/users/windows\\ user", true);
String home = RegistryUtils.homePathForUser("\u0413PA\u0414_3");
operations.mknode(home, true);
operations.mknode(
RegistryUtils.servicePath(home, "service.class", "service 4_5"),
true);
operations.mknode(
RegistryUtils.homePathForUser("[email protected]"),
true);
operations.mknode(
RegistryUtils.homePathForUser("hbase/[email protected]"),
true);
home = RegistryUtils.homePathForUser("ADMINISTRATOR/127.0.0.1");
assertTrue("No 'administrator' in " + home, home.contains("administrator"));
operations.mknode(
home,
true);
}
}
| 10,913 | 31.873494 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/cli/RegistryCli.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.registry.cli;
import static org.apache.hadoop.registry.client.binding.RegistryTypeUtils.*;
import java.io.Closeable;
import java.io.IOException;
import java.io.PrintStream;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.List;
import java.util.Map;
import com.google.common.base.Preconditions;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.CommandLineParser;
import org.apache.commons.cli.GnuParser;
import org.apache.commons.cli.Option;
import org.apache.commons.cli.OptionBuilder;
import org.apache.commons.cli.Options;
import org.apache.commons.cli.ParseException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.PathNotFoundException;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.service.ServiceOperations;
import org.apache.hadoop.util.ExitUtil;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
import org.apache.hadoop.registry.client.api.BindFlags;
import org.apache.hadoop.registry.client.api.RegistryOperations;
import org.apache.hadoop.registry.client.api.RegistryOperationsFactory;
import org.apache.hadoop.registry.client.exceptions.AuthenticationFailedException;
import org.apache.hadoop.registry.client.exceptions.InvalidPathnameException;
import org.apache.hadoop.registry.client.exceptions.InvalidRecordException;
import org.apache.hadoop.registry.client.exceptions.NoPathPermissionsException;
import org.apache.hadoop.registry.client.exceptions.NoRecordException;
import org.apache.hadoop.registry.client.types.Endpoint;
import org.apache.hadoop.registry.client.types.ProtocolTypes;
import org.apache.hadoop.registry.client.types.ServiceRecord;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Command line for registry operations.
*/
public class RegistryCli extends Configured implements Tool, Closeable {
private static final Logger LOG =
LoggerFactory.getLogger(RegistryCli.class);
protected final PrintStream sysout;
protected final PrintStream syserr;
private RegistryOperations registry;
private static final String LS_USAGE = "ls pathName";
private static final String RESOLVE_USAGE = "resolve pathName";
private static final String BIND_USAGE =
"bind -inet -api apiName -p portNumber -h hostName pathName" + "\n"
+ "bind -webui uriString -api apiName pathName" + "\n"
+ "bind -rest uriString -api apiName pathName";
private static final String MKNODE_USAGE = "mknode directoryName";
private static final String RM_USAGE = "rm pathName";
private static final String USAGE =
"\n" + LS_USAGE + "\n" + RESOLVE_USAGE + "\n" + BIND_USAGE + "\n" +
MKNODE_USAGE + "\n" + RM_USAGE;
public RegistryCli(PrintStream sysout, PrintStream syserr) {
Configuration conf = new Configuration();
super.setConf(conf);
registry = RegistryOperationsFactory.createInstance(conf);
registry.start();
this.sysout = sysout;
this.syserr = syserr;
}
public RegistryCli(RegistryOperations reg,
Configuration conf,
PrintStream sysout,
PrintStream syserr) {
super(conf);
Preconditions.checkArgument(reg != null, "Null registry");
registry = reg;
this.sysout = sysout;
this.syserr = syserr;
}
@SuppressWarnings("UseOfSystemOutOrSystemErr")
public static void main(String[] args) throws Exception {
int res = -1;
try (RegistryCli cli = new RegistryCli(System.out, System.err)) {
res = ToolRunner.run(cli, args);
} catch (Exception e) {
ExitUtil.terminate(res, e);
}
ExitUtil.terminate(res);
}
/**
* Close the object by stopping the registry.
* <p>
* <i>Important:</i>
* <p>
* After this call is made, no operations may be made of this
* object, <i>or of a YARN registry instance used when constructing
* this object. </i>
* @throws IOException
*/
@Override
public void close() throws IOException {
ServiceOperations.stopQuietly(registry);
registry = null;
}
private int usageError(String err, String usage) {
syserr.println("Error: " + err);
syserr.println("Usage: " + usage);
return -1;
}
private boolean validatePath(String path) {
if (!path.startsWith("/")) {
syserr.println("Path must start with /; given path was: " + path);
return false;
}
return true;
}
@Override
public int run(String[] args) throws Exception {
Preconditions.checkArgument(getConf() != null, "null configuration");
if (args.length > 0) {
switch (args[0]) {
case "ls":
return ls(args);
case "resolve":
return resolve(args);
case "bind":
return bind(args);
case "mknode":
return mknode(args);
case "rm":
return rm(args);
default:
return usageError("Invalid command: " + args[0], USAGE);
}
}
return usageError("No command arg passed.", USAGE);
}
@SuppressWarnings("unchecked")
public int ls(String[] args) {
Options lsOption = new Options();
CommandLineParser parser = new GnuParser();
try {
CommandLine line = parser.parse(lsOption, args);
List<String> argsList = line.getArgList();
if (argsList.size() != 2) {
return usageError("ls requires exactly one path argument", LS_USAGE);
}
if (!validatePath(argsList.get(1))) {
return -1;
}
try {
List<String> children = registry.list(argsList.get(1));
for (String child : children) {
sysout.println(child);
}
return 0;
} catch (Exception e) {
syserr.println(analyzeException("ls", e, argsList));
}
return -1;
} catch (ParseException exp) {
return usageError("Invalid syntax " + exp, LS_USAGE);
}
}
@SuppressWarnings("unchecked")
public int resolve(String[] args) {
Options resolveOption = new Options();
CommandLineParser parser = new GnuParser();
try {
CommandLine line = parser.parse(resolveOption, args);
List<String> argsList = line.getArgList();
if (argsList.size() != 2) {
return usageError("resolve requires exactly one path argument",
RESOLVE_USAGE);
}
if (!validatePath(argsList.get(1))) {
return -1;
}
try {
ServiceRecord record = registry.resolve(argsList.get(1));
for (Endpoint endpoint : record.external) {
sysout.println(" Endpoint(ProtocolType="
+ endpoint.protocolType + ", Api="
+ endpoint.api + ");"
+ " Addresses(AddressType="
+ endpoint.addressType + ") are: ");
for (Map<String, String> address : endpoint.addresses) {
sysout.println("[ ");
for (Map.Entry<String, String> entry : address.entrySet()) {
sysout.print("\t" + entry.getKey()
+ ":" + entry.getValue());
}
sysout.println("\n]");
}
sysout.println();
}
return 0;
} catch (Exception e) {
syserr.println(analyzeException("resolve", e, argsList));
}
return -1;
} catch (ParseException exp) {
return usageError("Invalid syntax " + exp, RESOLVE_USAGE);
}
}
public int bind(String[] args) {
Option rest = OptionBuilder.withArgName("rest")
.hasArg()
.withDescription("rest Option")
.create("rest");
Option webui = OptionBuilder.withArgName("webui")
.hasArg()
.withDescription("webui Option")
.create("webui");
Option inet = OptionBuilder.withArgName("inet")
.withDescription("inet Option")
.create("inet");
Option port = OptionBuilder.withArgName("port")
.hasArg()
.withDescription("port to listen on [9999]")
.create("p");
Option host = OptionBuilder.withArgName("host")
.hasArg()
.withDescription("host name")
.create("h");
Option apiOpt = OptionBuilder.withArgName("api")
.hasArg()
.withDescription("api")
.create("api");
Options inetOption = new Options();
inetOption.addOption(inet);
inetOption.addOption(port);
inetOption.addOption(host);
inetOption.addOption(apiOpt);
Options webuiOpt = new Options();
webuiOpt.addOption(webui);
webuiOpt.addOption(apiOpt);
Options restOpt = new Options();
restOpt.addOption(rest);
restOpt.addOption(apiOpt);
CommandLineParser parser = new GnuParser();
ServiceRecord sr = new ServiceRecord();
CommandLine line;
if (args.length <= 1) {
return usageError("Invalid syntax ", BIND_USAGE);
}
if (args[1].equals("-inet")) {
int portNum;
String hostName;
String api;
try {
line = parser.parse(inetOption, args);
} catch (ParseException exp) {
return usageError("Invalid syntax " + exp.getMessage(), BIND_USAGE);
}
if (line.hasOption("inet") && line.hasOption("p") &&
line.hasOption("h") && line.hasOption("api")) {
try {
portNum = Integer.parseInt(line.getOptionValue("p"));
} catch (NumberFormatException exp) {
return usageError("Invalid Port - int required" + exp.getMessage(),
BIND_USAGE);
}
hostName = line.getOptionValue("h");
api = line.getOptionValue("api");
sr.addExternalEndpoint(
inetAddrEndpoint(api, ProtocolTypes.PROTOCOL_HADOOP_IPC, hostName,
portNum));
} else {
return usageError("Missing options: must have host, port and api",
BIND_USAGE);
}
} else if (args[1].equals("-webui")) {
try {
line = parser.parse(webuiOpt, args);
} catch (ParseException exp) {
return usageError("Invalid syntax " + exp.getMessage(), BIND_USAGE);
}
if (line.hasOption("webui") && line.hasOption("api")) {
URI theUri;
try {
theUri = new URI(line.getOptionValue("webui"));
} catch (URISyntaxException e) {
return usageError("Invalid URI: " + e.getMessage(), BIND_USAGE);
}
sr.addExternalEndpoint(webEndpoint(line.getOptionValue("api"), theUri));
} else {
return usageError("Missing options: must have value for uri and api",
BIND_USAGE);
}
} else if (args[1].equals("-rest")) {
try {
line = parser.parse(restOpt, args);
} catch (ParseException exp) {
return usageError("Invalid syntax " + exp.getMessage(), BIND_USAGE);
}
if (line.hasOption("rest") && line.hasOption("api")) {
URI theUri = null;
try {
theUri = new URI(line.getOptionValue("rest"));
} catch (URISyntaxException e) {
return usageError("Invalid URI: " + e.getMessage(), BIND_USAGE);
}
sr.addExternalEndpoint(
restEndpoint(line.getOptionValue("api"), theUri));
} else {
return usageError("Missing options: must have value for uri and api",
BIND_USAGE);
}
} else {
return usageError("Invalid syntax", BIND_USAGE);
}
@SuppressWarnings("unchecked")
List<String> argsList = line.getArgList();
if (argsList.size() != 2) {
return usageError("bind requires exactly one path argument", BIND_USAGE);
}
if (!validatePath(argsList.get(1))) {
return -1;
}
try {
registry.bind(argsList.get(1), sr, BindFlags.OVERWRITE);
return 0;
} catch (Exception e) {
syserr.println(analyzeException("bind", e, argsList));
}
return -1;
}
@SuppressWarnings("unchecked")
public int mknode(String[] args) {
Options mknodeOption = new Options();
CommandLineParser parser = new GnuParser();
try {
CommandLine line = parser.parse(mknodeOption, args);
List<String> argsList = line.getArgList();
if (argsList.size() != 2) {
return usageError("mknode requires exactly one path argument",
MKNODE_USAGE);
}
if (!validatePath(argsList.get(1))) {
return -1;
}
try {
registry.mknode(args[1], false);
return 0;
} catch (Exception e) {
syserr.println(analyzeException("mknode", e, argsList));
}
return -1;
} catch (ParseException exp) {
return usageError("Invalid syntax " + exp.toString(), MKNODE_USAGE);
}
}
@SuppressWarnings("unchecked")
public int rm(String[] args) {
Option recursive = OptionBuilder.withArgName("recursive")
.withDescription("delete recursively")
.create("r");
Options rmOption = new Options();
rmOption.addOption(recursive);
boolean recursiveOpt = false;
CommandLineParser parser = new GnuParser();
try {
CommandLine line = parser.parse(rmOption, args);
List<String> argsList = line.getArgList();
if (argsList.size() != 2) {
return usageError("RM requires exactly one path argument", RM_USAGE);
}
if (!validatePath(argsList.get(1))) {
return -1;
}
try {
if (line.hasOption("r")) {
recursiveOpt = true;
}
registry.delete(argsList.get(1), recursiveOpt);
return 0;
} catch (Exception e) {
syserr.println(analyzeException("rm", e, argsList));
}
return -1;
} catch (ParseException exp) {
return usageError("Invalid syntax " + exp.toString(), RM_USAGE);
}
}
/**
* Given an exception and a possibly empty argument list, generate
* a diagnostics string for use in error messages
* @param operation the operation that failed
* @param e exception
* @param argsList arguments list
* @return a string intended for the user
*/
String analyzeException(String operation,
Exception e,
List<String> argsList) {
String pathArg = !argsList.isEmpty() ? argsList.get(1) : "(none)";
if (LOG.isDebugEnabled()) {
LOG.debug("Operation {} on path {} failed with exception {}",
operation, pathArg, e, e);
}
if (e instanceof InvalidPathnameException) {
return "InvalidPath :" + pathArg + ": " + e;
}
if (e instanceof PathNotFoundException) {
return "Path not found: " + pathArg;
}
if (e instanceof NoRecordException) {
return "No service record at path " + pathArg;
}
if (e instanceof AuthenticationFailedException) {
return "Failed to authenticate to registry : " + e;
}
if (e instanceof NoPathPermissionsException) {
return "No Permission to path: " + pathArg + ": " + e;
}
if (e instanceof AccessControlException) {
return "No Permission to path: " + pathArg + ": " + e;
}
if (e instanceof InvalidRecordException) {
return "Unable to read record at: " + pathArg + ": " + e;
}
if (e instanceof IOException) {
return "IO Exception when accessing path :" + pathArg + ": " + e;
}
// something else went very wrong here
return "Exception " + e;
}
}
| 16,498 | 32.130522 | 82 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/api/RegistryConstants.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.registry.client.api;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* Constants for the registry, including configuration keys and default
* values.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public interface RegistryConstants {
/**
* prefix for registry configuration options: {@value}.
* Why <code>hadoop.</code> and not YARN? It can
* live outside YARN
*/
String REGISTRY_PREFIX = "hadoop.registry.";
/**
* Prefix for zookeeper-specific options: {@value}
* <p>
* For clients using other protocols, these options are not supported.
*/
String ZK_PREFIX = REGISTRY_PREFIX + "zk.";
/**
* flag to indicate whether or not the registry should
* be enabled in the RM: {@value}
*/
String KEY_REGISTRY_ENABLED = REGISTRY_PREFIX + "rm.enabled";
/**
* Defaut value for enabling the registry in the RM: {@value}
*/
boolean DEFAULT_REGISTRY_ENABLED = false;
/**
* Key to set if the registry is secure: {@value}.
* Turning it on changes the permissions policy from "open access"
* to restrictions on kerberos with the option of
* a user adding one or more auth key pairs down their
* own tree.
*/
String KEY_REGISTRY_SECURE = REGISTRY_PREFIX + "secure";
/**
* Default registry security policy: {@value}.
*/
boolean DEFAULT_REGISTRY_SECURE = false;
/**
* Root path in the ZK tree for the registry: {@value}
*/
String KEY_REGISTRY_ZK_ROOT = ZK_PREFIX + "root";
/**
* Default root of the yarn registry: {@value}
*/
String DEFAULT_ZK_REGISTRY_ROOT = "/registry";
/**
* Registry client authentication policy.
* <p>
* This is only used in secure clusters.
* <p>
* If the Factory methods of {@link RegistryOperationsFactory}
* are used, this key does not need to be set: it is set
* up based on the factory method used.
*/
String KEY_REGISTRY_CLIENT_AUTH =
REGISTRY_PREFIX + "client.auth";
/**
* Registry client uses Kerberos: authentication is automatic from
* logged in user
*/
String REGISTRY_CLIENT_AUTH_KERBEROS = "kerberos";
/**
* Username/password is the authentication mechanism.
* If set then both {@link #KEY_REGISTRY_CLIENT_AUTHENTICATION_ID}
* and {@link #KEY_REGISTRY_CLIENT_AUTHENTICATION_PASSWORD} must be set.
*/
String REGISTRY_CLIENT_AUTH_DIGEST = "digest";
/**
* No authentication; client is anonymous
*/
String REGISTRY_CLIENT_AUTH_ANONYMOUS = "";
/**
* Registry client authentication ID
* <p>
* This is only used in secure clusters with
* {@link #KEY_REGISTRY_CLIENT_AUTH} set to
* {@link #REGISTRY_CLIENT_AUTH_DIGEST}
*
*/
String KEY_REGISTRY_CLIENT_AUTHENTICATION_ID =
KEY_REGISTRY_CLIENT_AUTH + ".id";
/**
* Registry client authentication password.
* <p>
* This is only used in secure clusters with the client set to
* use digest (not SASL or anonymouse) authentication.
* <p>
* Specifically, {@link #KEY_REGISTRY_CLIENT_AUTH} set to
* {@link #REGISTRY_CLIENT_AUTH_DIGEST}
*
*/
String KEY_REGISTRY_CLIENT_AUTHENTICATION_PASSWORD =
KEY_REGISTRY_CLIENT_AUTH + ".password";
/**
* List of hostname:port pairs defining the
* zookeeper quorum binding for the registry {@value}
*/
String KEY_REGISTRY_ZK_QUORUM = ZK_PREFIX + "quorum";
/**
* The default zookeeper quorum binding for the registry: {@value}
*/
String DEFAULT_REGISTRY_ZK_QUORUM = "localhost:2181";
/**
* Zookeeper session timeout in milliseconds: {@value}
*/
String KEY_REGISTRY_ZK_SESSION_TIMEOUT =
ZK_PREFIX + "session.timeout.ms";
/**
* The default ZK session timeout: {@value}.
*/
int DEFAULT_ZK_SESSION_TIMEOUT = 60000;
/**
* Zookeeper connection timeout in milliseconds: {@value}.
*/
String KEY_REGISTRY_ZK_CONNECTION_TIMEOUT =
ZK_PREFIX + "connection.timeout.ms";
/**
* The default ZK connection timeout: {@value}.
*/
int DEFAULT_ZK_CONNECTION_TIMEOUT = 15000;
/**
* Zookeeper connection retry count before failing: {@value}.
*/
String KEY_REGISTRY_ZK_RETRY_TIMES = ZK_PREFIX + "retry.times";
/**
* The default # of times to retry a ZK connection: {@value}.
*/
int DEFAULT_ZK_RETRY_TIMES = 5;
/**
* Zookeeper connect interval in milliseconds: {@value}.
*/
String KEY_REGISTRY_ZK_RETRY_INTERVAL =
ZK_PREFIX + "retry.interval.ms";
/**
* The default interval between connection retries: {@value}.
*/
int DEFAULT_ZK_RETRY_INTERVAL = 1000;
/**
* Zookeeper retry limit in milliseconds, during
* exponential backoff: {@value}.
*
* This places a limit even
* if the retry times and interval limit, combined
* with the backoff policy, result in a long retry
* period
*
*/
String KEY_REGISTRY_ZK_RETRY_CEILING =
ZK_PREFIX + "retry.ceiling.ms";
/**
* Default limit on retries: {@value}.
*/
int DEFAULT_ZK_RETRY_CEILING = 60000;
/**
* A comma separated list of Zookeeper ACL identifiers with
* system access to the registry in a secure cluster: {@value}.
*
* These are given full access to all entries.
*
* If there is an "@" at the end of an entry it
* instructs the registry client to append the kerberos realm as
* derived from the login and {@link #KEY_REGISTRY_KERBEROS_REALM}.
*/
String KEY_REGISTRY_SYSTEM_ACCOUNTS = REGISTRY_PREFIX + "system.accounts";
/**
* Default system accounts given global access to the registry: {@value}.
*/
String DEFAULT_REGISTRY_SYSTEM_ACCOUNTS =
"sasl:yarn@, sasl:mapred@, sasl:hdfs@, sasl:hadoop@";
/**
* A comma separated list of Zookeeper ACL identifiers with
* system access to the registry in a secure cluster: {@value}.
*
* These are given full access to all entries.
*
* If there is an "@" at the end of an entry it
* instructs the registry client to append the default kerberos domain.
*/
String KEY_REGISTRY_USER_ACCOUNTS = REGISTRY_PREFIX + "user.accounts";
/**
* Default system acls: {@value}.
*/
String DEFAULT_REGISTRY_USER_ACCOUNTS = "";
/**
* The kerberos realm: {@value}.
*
* This is used to set the realm of
* system principals which do not declare their realm,
* and any other accounts that need the value.
*
* If empty, the default realm of the running process
* is used.
*
* If neither are known and the realm is needed, then the registry
* service/client will fail.
*/
String KEY_REGISTRY_KERBEROS_REALM = REGISTRY_PREFIX + "kerberos.realm";
/**
* Key to define the JAAS context. Used in secure registries: {@value}.
*/
String KEY_REGISTRY_CLIENT_JAAS_CONTEXT = REGISTRY_PREFIX + "jaas.context";
/**
* default client-side registry JAAS context: {@value}
*/
String DEFAULT_REGISTRY_CLIENT_JAAS_CONTEXT = "Client";
/**
* path to users off the root: {@value}.
*/
String PATH_USERS = "/users/";
/**
* path to system services off the root : {@value}.
*/
String PATH_SYSTEM_SERVICES = "/services/";
/**
* path to system services under a user's home path : {@value}.
*/
String PATH_USER_SERVICES = "/services/";
/**
* path under a service record to point to components of that service:
* {@value}.
*/
String SUBPATH_COMPONENTS = "/components/";
}
| 8,232 | 27.686411 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/api/package-info.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* YARN Registry Client API.
*
* This package contains the core API for the YARN registry.
*
* <ol>
* <li> Data types can be found in
* {@link org.apache.hadoop.registry.client.types}</li>
* <li> Exceptions are listed in
* {@link org.apache.hadoop.registry.client.exceptions}</li>
* <li> Classes to assist use of the registry are in
* {@link org.apache.hadoop.registry.client.binding}</li>
* </ol>
*
*
*/
package org.apache.hadoop.registry.client.api;
| 1,283 | 34.666667 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/api/BindFlags.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.registry.client.api;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* Combinable Flags to use when creating a service entry.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public interface BindFlags {
/**
* Create the entry.. This is just "0" and can be "or"ed with anything
*/
int CREATE = 0;
/**
* The entry should be created even if an existing entry is there.
*/
int OVERWRITE = 1;
}
| 1,335 | 30.809524 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/api/RegistryOperationsFactory.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.registry.client.api;
import com.google.common.base.Preconditions;
import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.service.ServiceStateException;
import org.apache.hadoop.registry.client.impl.RegistryOperationsClient;
import static org.apache.hadoop.registry.client.api.RegistryConstants.*;
/**
* A factory for registry operation service instances.
* <p>
* <i>Each created instance will be returned initialized.</i>
* <p>
* That is, the service will have had <code>Service.init(conf)</code> applied
* to it —possibly after the configuration has been modified to
* support the specific binding/security mechanism used
*/
public final class RegistryOperationsFactory {
private RegistryOperationsFactory() {
}
/**
* Create and initialize a registry operations instance.
* Access writes will be determined from the configuration
* @param conf configuration
* @return a registry operations instance
* @throws ServiceStateException on any failure to initialize
*/
public static RegistryOperations createInstance(Configuration conf) {
return createInstance("RegistryOperations", conf);
}
/**
* Create and initialize a registry operations instance.
* Access rights will be determined from the configuration
* @param name name of the instance
* @param conf configuration
* @return a registry operations instance
* @throws ServiceStateException on any failure to initialize
*/
public static RegistryOperations createInstance(String name, Configuration conf) {
Preconditions.checkArgument(conf != null, "Null configuration");
RegistryOperationsClient operations =
new RegistryOperationsClient(name);
operations.init(conf);
return operations;
}
/**
* Create and initialize an anonymous read/write registry operations instance.
* In a secure cluster, this instance will only have read access to the
* registry.
* @param conf configuration
* @return an anonymous registry operations instance
*
* @throws ServiceStateException on any failure to initialize
*/
public static RegistryOperations createAnonymousInstance(Configuration conf) {
Preconditions.checkArgument(conf != null, "Null configuration");
conf.set(KEY_REGISTRY_CLIENT_AUTH, REGISTRY_CLIENT_AUTH_ANONYMOUS);
return createInstance("AnonymousRegistryOperations", conf);
}
/**
* Create and initialize an secure, Kerberos-authenticated instance.
*
* The user identity will be inferred from the current user
*
* The authentication of this instance will expire when any kerberos
* tokens needed to authenticate with the registry infrastructure expire.
* @param conf configuration
* @param jaasContext the JAAS context of the account.
* @return a registry operations instance
* @throws ServiceStateException on any failure to initialize
*/
public static RegistryOperations createKerberosInstance(Configuration conf,
String jaasContext) {
Preconditions.checkArgument(conf != null, "Null configuration");
conf.set(KEY_REGISTRY_CLIENT_AUTH, REGISTRY_CLIENT_AUTH_KERBEROS);
conf.set(KEY_REGISTRY_CLIENT_JAAS_CONTEXT, jaasContext);
return createInstance("KerberosRegistryOperations", conf);
}
/**
* Create and initialize an operations instance authenticated with write
* access via an <code>id:password</code> pair.
*
* The instance will have the read access
* across the registry, but write access only to that part of the registry
* to which it has been give the relevant permissions.
* @param conf configuration
* @param id user ID
* @param password password
* @return a registry operations instance
* @throws ServiceStateException on any failure to initialize
* @throws IllegalArgumentException if an argument is invalid
*/
public static RegistryOperations createAuthenticatedInstance(Configuration conf,
String id,
String password) {
Preconditions.checkArgument(!StringUtils.isEmpty(id), "empty Id");
Preconditions.checkArgument(!StringUtils.isEmpty(password), "empty Password");
Preconditions.checkArgument(conf != null, "Null configuration");
conf.set(KEY_REGISTRY_CLIENT_AUTH, REGISTRY_CLIENT_AUTH_DIGEST);
conf.set(KEY_REGISTRY_CLIENT_AUTHENTICATION_ID, id);
conf.set(KEY_REGISTRY_CLIENT_AUTHENTICATION_PASSWORD, password);
return createInstance("DigestRegistryOperations", conf);
}
}
| 5,317 | 39.287879 | 84 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/api/RegistryOperations.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.registry.client.api;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.FileAlreadyExistsException;
import org.apache.hadoop.fs.PathIsNotEmptyDirectoryException;
import org.apache.hadoop.fs.PathNotFoundException;
import org.apache.hadoop.service.Service;
import org.apache.hadoop.registry.client.exceptions.InvalidPathnameException;
import org.apache.hadoop.registry.client.exceptions.InvalidRecordException;
import org.apache.hadoop.registry.client.exceptions.NoRecordException;
import org.apache.hadoop.registry.client.types.RegistryPathStatus;
import org.apache.hadoop.registry.client.types.ServiceRecord;
import java.io.IOException;
import java.util.List;
/**
* Registry Operations
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public interface RegistryOperations extends Service {
/**
* Create a path.
*
* It is not an error if the path exists already, be it empty or not.
*
* The createParents flag also requests creating the parents.
* As entries in the registry can hold data while still having
* child entries, it is not an error if any of the parent path
* elements have service records.
*
* @param path path to create
* @param createParents also create the parents.
* @throws PathNotFoundException parent path is not in the registry.
* @throws InvalidPathnameException path name is invalid.
* @throws IOException Any other IO Exception.
* @return true if the path was created, false if it existed.
*/
boolean mknode(String path, boolean createParents)
throws PathNotFoundException,
InvalidPathnameException,
IOException;
/**
* Bind a path in the registry to a service record
* @param path path to service record
* @param record service record service record to create/update
* @param flags bind flags
* @throws PathNotFoundException the parent path does not exist
* @throws FileAlreadyExistsException path exists but create flags
* do not include "overwrite"
* @throws InvalidPathnameException path name is invalid.
* @throws IOException Any other IO Exception.
*/
void bind(String path, ServiceRecord record, int flags)
throws PathNotFoundException,
FileAlreadyExistsException,
InvalidPathnameException,
IOException;
/**
* Resolve the record at a path
* @param path path to an entry containing a {@link ServiceRecord}
* @return the record
* @throws PathNotFoundException path is not in the registry.
* @throws NoRecordException if there is not a service record
* @throws InvalidRecordException if there was a service record but it could
* not be parsed.
* @throws IOException Any other IO Exception
*/
ServiceRecord resolve(String path)
throws PathNotFoundException,
NoRecordException,
InvalidRecordException,
IOException;
/**
* Get the status of a path
* @param path path to query
* @return the status of the path
* @throws PathNotFoundException path is not in the registry.
* @throws InvalidPathnameException the path is invalid.
* @throws IOException Any other IO Exception
*/
RegistryPathStatus stat(String path)
throws PathNotFoundException,
InvalidPathnameException,
IOException;
/**
* Probe for a path existing.
* This is equivalent to {@link #stat(String)} with
* any failure downgraded to a
* @param path path to query
* @return true if the path was found
* @throws IOException
*/
boolean exists(String path) throws IOException;
/**
* List all entries under a registry path, returning the relative names
* of the entries.
* @param path path to query
* @return a possibly empty list of the short path names of
* child entries.
* @throws PathNotFoundException
* @throws InvalidPathnameException
* @throws IOException
*/
List<String> list(String path) throws
PathNotFoundException,
InvalidPathnameException,
IOException;
/**
* Delete a path.
*
* If the operation returns without an error then the entry has been
* deleted.
* @param path path delete recursively
* @param recursive recursive flag
* @throws PathNotFoundException path is not in the registry.
* @throws InvalidPathnameException the path is invalid.
* @throws PathIsNotEmptyDirectoryException path has child entries, but
* recursive is false.
* @throws IOException Any other IO Exception
*
*/
void delete(String path, boolean recursive)
throws PathNotFoundException,
PathIsNotEmptyDirectoryException,
InvalidPathnameException,
IOException;
/**
* Add a new write access entry to be added to node permissions in all
* future write operations of a session connected to a secure registry.
*
* This does not grant the session any more rights: if it lacked any write
* access, it will still be unable to manipulate the registry.
*
* In an insecure cluster, this operation has no effect.
* @param id ID to use
* @param pass password
* @return true if the accessor was added: that is, the registry connection
* uses permissions to manage access
* @throws IOException on any failure to build the digest
*/
boolean addWriteAccessor(String id, String pass) throws IOException;
/**
* Clear all write accessors.
*
* At this point all standard permissions/ACLs are retained,
* including any set on behalf of the user
* Only accessors added via {@link #addWriteAccessor(String, String)}
* are removed.
*/
public void clearWriteAccessors();
}
| 6,500 | 34.52459 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/binding/package-info.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Registry binding utility classes.
*/
package org.apache.hadoop.registry.client.binding;
| 903 | 38.304348 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/binding/RegistryPathUtils.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.registry.client.binding;
import com.google.common.base.Preconditions;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.PathNotFoundException;
import org.apache.hadoop.registry.client.exceptions.InvalidPathnameException;
import org.apache.hadoop.registry.client.impl.zk.RegistryInternalConstants;
import org.apache.zookeeper.common.PathUtils;
import java.net.IDN;
import java.util.ArrayList;
import java.util.List;
import java.util.regex.Pattern;
/**
* Basic operations on paths: manipulating them and creating and validating
* path elements.
*/
@InterfaceAudience.Private
@InterfaceStability.Evolving
public class RegistryPathUtils {
/**
* Compiled down pattern to validate single entries in the path
*/
private static final Pattern PATH_ENTRY_VALIDATION_PATTERN =
Pattern.compile(RegistryInternalConstants.VALID_PATH_ENTRY_PATTERN);
/**
* Validate ZK path with the path itself included in
* the exception text
* @param path path to validate
* @return the path parameter
* @throws InvalidPathnameException if the pathname is invalid.
*/
public static String validateZKPath(String path) throws
InvalidPathnameException {
try {
PathUtils.validatePath(path);
} catch (IllegalArgumentException e) {
throw new InvalidPathnameException(path,
"Invalid Path \"" + path + "\" : " + e, e);
}
return path;
}
/**
* Validate ZK path as valid for a DNS hostname.
* @param path path to validate
* @return the path parameter
* @throws InvalidPathnameException if the pathname is invalid.
*/
public static String validateElementsAsDNS(String path) throws
InvalidPathnameException {
List<String> splitpath = split(path);
for (String fragment : splitpath) {
if (!PATH_ENTRY_VALIDATION_PATTERN.matcher(fragment).matches()) {
throw new InvalidPathnameException(path,
"Invalid Path element \"" + fragment + "\"");
}
}
return path;
}
/**
* Create a full path from the registry root and the supplied subdir
* @param path path of operation
* @return an absolute path
* @throws InvalidPathnameException if the path is invalid
*/
public static String createFullPath(String base, String path) throws
InvalidPathnameException {
Preconditions.checkArgument(path != null, "null path");
Preconditions.checkArgument(base != null, "null path");
return validateZKPath(join(base, path));
}
/**
* Join two paths, guaranteeing that there will not be exactly
* one separator between the two, and exactly one at the front
* of the path. There will be no trailing "/" except for the special
* case that this is the root path
* @param base base path
* @param path second path to add
* @return a combined path.
*/
public static String join(String base, String path) {
Preconditions.checkArgument(path != null, "null path");
Preconditions.checkArgument(base != null, "null path");
StringBuilder fullpath = new StringBuilder();
if (!base.startsWith("/")) {
fullpath.append('/');
}
fullpath.append(base);
// guarantee a trailing /
if (!fullpath.toString().endsWith("/")) {
fullpath.append("/");
}
// strip off any at the beginning
if (path.startsWith("/")) {
// path starts with /, so append all other characters -if present
if (path.length() > 1) {
fullpath.append(path.substring(1));
}
} else {
fullpath.append(path);
}
//here there may be a trailing "/"
String finalpath = fullpath.toString();
if (finalpath.endsWith("/") && !"/".equals(finalpath)) {
finalpath = finalpath.substring(0, finalpath.length() - 1);
}
return finalpath;
}
/**
* split a path into elements, stripping empty elements
* @param path the path
* @return the split path
*/
public static List<String> split(String path) {
//
String[] pathelements = path.split("/");
List<String> dirs = new ArrayList<String>(pathelements.length);
for (String pathelement : pathelements) {
if (!pathelement.isEmpty()) {
dirs.add(pathelement);
}
}
return dirs;
}
/**
* Get the last entry in a path; for an empty path
* returns "". The split logic is that of
* {@link #split(String)}
* @param path path of operation
* @return the last path entry or "" if none.
*/
public static String lastPathEntry(String path) {
List<String> splits = split(path);
if (splits.isEmpty()) {
// empty path. Return ""
return "";
} else {
return splits.get(splits.size() - 1);
}
}
/**
* Get the parent of a path
* @param path path to look at
* @return the parent path
* @throws PathNotFoundException if the path was at root.
*/
public static String parentOf(String path) throws PathNotFoundException {
List<String> elements = split(path);
int size = elements.size();
if (size == 0) {
throw new PathNotFoundException("No parent of " + path);
}
if (size == 1) {
return "/";
}
elements.remove(size - 1);
StringBuilder parent = new StringBuilder(path.length());
for (String element : elements) {
parent.append("/");
parent.append(element);
}
return parent.toString();
}
/**
* Perform any formatting for the registry needed to convert
* non-simple-DNS elements
* @param element element to encode
* @return an encoded string
*/
public static String encodeForRegistry(String element) {
return IDN.toASCII(element);
}
/**
* Perform whatever transforms are needed to get a YARN ID into
* a DNS-compatible name
* @param yarnId ID as string of YARN application, instance or container
* @return a string suitable for use in registry paths.
*/
public static String encodeYarnID(String yarnId) {
return yarnId.replace("_", "-");
}
}
| 6,866 | 30.356164 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/binding/RegistryUtils.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.registry.client.binding;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.PathNotFoundException;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.registry.client.api.RegistryConstants;
import org.apache.hadoop.registry.client.api.RegistryOperations;
import org.apache.hadoop.registry.client.exceptions.InvalidPathnameException;
import org.apache.hadoop.registry.client.exceptions.InvalidRecordException;
import org.apache.hadoop.registry.client.exceptions.NoRecordException;
import org.apache.hadoop.registry.client.impl.zk.RegistryInternalConstants;
import org.apache.hadoop.registry.client.types.RegistryPathStatus;
import org.apache.hadoop.registry.client.types.ServiceRecord;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import static org.apache.hadoop.registry.client.binding.RegistryPathUtils.*;
import java.io.EOFException;
import java.io.IOException;
import java.util.Collection;
import java.util.HashMap;
import java.util.List;
import java.util.Locale;
import java.util.Map;
/**
* Utility methods for working with a registry.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class RegistryUtils {
private static final Logger LOG =
LoggerFactory.getLogger(RegistryUtils.class);
/**
* Buld the user path -switches to the system path if the user is "".
* It also cross-converts the username to ascii via punycode
* @param username username or ""
* @return the path to the user
*/
public static String homePathForUser(String username) {
Preconditions.checkArgument(username != null, "null user");
// catch recursion
if (username.startsWith(RegistryConstants.PATH_USERS)) {
return username;
}
if (username.isEmpty()) {
return RegistryConstants.PATH_SYSTEM_SERVICES;
}
// convert username to registry name
String convertedName = convertUsername(username);
return RegistryPathUtils.join(RegistryConstants.PATH_USERS,
encodeForRegistry(convertedName));
}
/**
* Convert the username to that which can be used for registry
* entries. Lower cases it,
* Strip the kerberos realm off a username if needed, and any "/" hostname
* entries
* @param username user
* @return the converted username
*/
public static String convertUsername(String username) {
String converted =
org.apache.hadoop.util.StringUtils.toLowerCase(username);
int atSymbol = converted.indexOf('@');
if (atSymbol > 0) {
converted = converted.substring(0, atSymbol);
}
int slashSymbol = converted.indexOf('/');
if (slashSymbol > 0) {
converted = converted.substring(0, slashSymbol);
}
return converted;
}
/**
* Create a service classpath
* @param user username or ""
* @param serviceClass service name
* @return a full path
*/
public static String serviceclassPath(String user,
String serviceClass) {
String services = join(homePathForUser(user),
RegistryConstants.PATH_USER_SERVICES);
return join(services,
serviceClass);
}
/**
* Create a path to a service under a user and service class
* @param user username or ""
* @param serviceClass service name
* @param serviceName service name unique for that user and service class
* @return a full path
*/
public static String servicePath(String user,
String serviceClass,
String serviceName) {
return join(
serviceclassPath(user, serviceClass),
serviceName);
}
/**
* Create a path for listing components under a service
* @param user username or ""
* @param serviceClass service name
* @param serviceName service name unique for that user and service class
* @return a full path
*/
public static String componentListPath(String user,
String serviceClass, String serviceName) {
return join(servicePath(user, serviceClass, serviceName),
RegistryConstants.SUBPATH_COMPONENTS);
}
/**
* Create the path to a service record for a component
* @param user username or ""
* @param serviceClass service name
* @param serviceName service name unique for that user and service class
* @param componentName unique name/ID of the component
* @return a full path
*/
public static String componentPath(String user,
String serviceClass, String serviceName, String componentName) {
return join(
componentListPath(user, serviceClass, serviceName),
componentName);
}
/**
* List service records directly under a path
* @param registryOperations registry operations instance
* @param path path to list
* @return a mapping of the service records that were resolved, indexed
* by their full path
* @throws IOException
*/
public static Map<String, ServiceRecord> listServiceRecords(
RegistryOperations registryOperations,
String path) throws IOException {
Map<String, RegistryPathStatus> children =
statChildren(registryOperations, path);
return extractServiceRecords(registryOperations,
path,
children.values());
}
/**
* List children of a directory and retrieve their
* {@link RegistryPathStatus} values.
* <p>
* This is not an atomic operation; A child may be deleted
* during the iteration through the child entries. If this happens,
* the <code>PathNotFoundException</code> is caught and that child
* entry ommitted.
*
* @param path path
* @return a possibly empty map of child entries listed by
* their short name.
* @throws PathNotFoundException path is not in the registry.
* @throws InvalidPathnameException the path is invalid.
* @throws IOException Any other IO Exception
*/
public static Map<String, RegistryPathStatus> statChildren(
RegistryOperations registryOperations,
String path)
throws PathNotFoundException,
InvalidPathnameException,
IOException {
List<String> childNames = registryOperations.list(path);
Map<String, RegistryPathStatus> results =
new HashMap<String, RegistryPathStatus>();
for (String childName : childNames) {
String child = join(path, childName);
try {
RegistryPathStatus stat = registryOperations.stat(child);
results.put(childName, stat);
} catch (PathNotFoundException pnfe) {
if (LOG.isDebugEnabled()) {
LOG.debug("stat failed on {}: moved? {}", child, pnfe, pnfe);
}
// and continue
}
}
return results;
}
/**
* Get the home path of the current user.
* <p>
* In an insecure cluster, the environment variable
* <code>HADOOP_USER_NAME</code> is queried <i>first</i>.
* <p>
* This means that in a YARN container where the creator set this
* environment variable to propagate their identity, the defined
* user name is used in preference to the actual user.
* <p>
* In a secure cluster, the kerberos identity of the current user is used.
* @return a path for the current user's home dir.
* @throws RuntimeException if the current user identity cannot be determined
* from the OS/kerberos.
*/
public static String homePathForCurrentUser() {
String shortUserName = currentUsernameUnencoded();
return homePathForUser(shortUserName);
}
/**
* Get the current username, before any encoding has been applied.
* @return the current user from the kerberos identity, falling back
* to the user and/or env variables.
*/
private static String currentUsernameUnencoded() {
String env_hadoop_username = System.getenv(
RegistryInternalConstants.HADOOP_USER_NAME);
return getCurrentUsernameUnencoded(env_hadoop_username);
}
/**
* Get the current username, using the value of the parameter
* <code>env_hadoop_username</code> if it is set on an insecure cluster.
* This ensures that the username propagates correctly across processes
* started by YARN.
* <p>
* This method is primarly made visible for testing.
* @param env_hadoop_username the environment variable
* @return the selected username
* @throws RuntimeException if there is a problem getting the short user
* name of the current user.
*/
@VisibleForTesting
public static String getCurrentUsernameUnencoded(String env_hadoop_username) {
String shortUserName = null;
if (!UserGroupInformation.isSecurityEnabled()) {
shortUserName = env_hadoop_username;
}
if (StringUtils.isEmpty(shortUserName)) {
try {
shortUserName = UserGroupInformation.getCurrentUser().getShortUserName();
} catch (IOException e) {
throw new RuntimeException(e);
}
}
return shortUserName;
}
/**
* Get the current user path formatted for the registry
* <p>
* In an insecure cluster, the environment variable
* <code>HADOOP_USER_NAME </code> is queried <i>first</i>.
* <p>
* This means that in a YARN container where the creator set this
* environment variable to propagate their identity, the defined
* user name is used in preference to the actual user.
* <p>
* In a secure cluster, the kerberos identity of the current user is used.
* @return the encoded shortname of the current user
* @throws RuntimeException if the current user identity cannot be determined
* from the OS/kerberos.
*
*/
public static String currentUser() {
String shortUserName = currentUsernameUnencoded();
return encodeForRegistry(shortUserName);
}
/**
* Extract all service records under a list of stat operations...this
* skips entries that are too short or simply not matching
* @param operations operation support for fetches
* @param parentpath path of the parent of all the entries
* @param stats Collection of stat results
* @return a possibly empty map of fullpath:record.
* @throws IOException for any IO Operation that wasn't ignored.
*/
public static Map<String, ServiceRecord> extractServiceRecords(
RegistryOperations operations,
String parentpath,
Collection<RegistryPathStatus> stats) throws IOException {
Map<String, ServiceRecord> results = new HashMap<String, ServiceRecord>(stats.size());
for (RegistryPathStatus stat : stats) {
if (stat.size > ServiceRecord.RECORD_TYPE.length()) {
// maybe has data
String path = join(parentpath, stat.path);
try {
ServiceRecord serviceRecord = operations.resolve(path);
results.put(path, serviceRecord);
} catch (EOFException ignored) {
if (LOG.isDebugEnabled()) {
LOG.debug("data too short for {}", path);
}
} catch (InvalidRecordException record) {
if (LOG.isDebugEnabled()) {
LOG.debug("Invalid record at {}", path);
}
} catch (NoRecordException record) {
if (LOG.isDebugEnabled()) {
LOG.debug("No record at {}", path);
}
}
}
}
return results;
}
/**
* Extract all service records under a list of stat operations...this
* non-atomic action skips entries that are too short or simply not matching.
* <p>
* @param operations operation support for fetches
* @param parentpath path of the parent of all the entries
* @return a possibly empty map of fullpath:record.
* @throws IOException for any IO Operation that wasn't ignored.
*/
public static Map<String, ServiceRecord> extractServiceRecords(
RegistryOperations operations,
String parentpath,
Map<String , RegistryPathStatus> stats) throws IOException {
return extractServiceRecords(operations, parentpath, stats.values());
}
/**
* Extract all service records under a list of stat operations...this
* non-atomic action skips entries that are too short or simply not matching.
* <p>
* @param operations operation support for fetches
* @param parentpath path of the parent of all the entries
* @return a possibly empty map of fullpath:record.
* @throws IOException for any IO Operation that wasn't ignored.
*/
public static Map<String, ServiceRecord> extractServiceRecords(
RegistryOperations operations,
String parentpath) throws IOException {
return
extractServiceRecords(operations,
parentpath,
statChildren(operations, parentpath).values());
}
/**
* Static instance of service record marshalling
*/
public static class ServiceRecordMarshal extends JsonSerDeser<ServiceRecord> {
public ServiceRecordMarshal() {
super(ServiceRecord.class);
}
}
}
| 13,700 | 34.403101 | 90 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/binding/RegistryTypeUtils.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.registry.client.binding;
import com.google.common.base.Preconditions;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.registry.client.exceptions.InvalidRecordException;
import static org.apache.hadoop.registry.client.types.AddressTypes.*;
import org.apache.hadoop.registry.client.types.Endpoint;
import org.apache.hadoop.registry.client.types.ProtocolTypes;
import org.apache.hadoop.registry.client.types.ServiceRecord;
import java.net.InetSocketAddress;
import java.net.MalformedURLException;
import java.net.URI;
import java.net.URL;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
/**
* Static methods to work with registry types —primarily endpoints and the
* list representation of addresses.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class RegistryTypeUtils {
/**
* Create a URL endpoint from a list of URIs
* @param api implemented API
* @param protocolType protocol type
* @param uris URIs
* @return a new endpoint
*/
public static Endpoint urlEndpoint(String api,
String protocolType,
URI... uris) {
return new Endpoint(api, protocolType, uris);
}
/**
* Create a REST endpoint from a list of URIs
* @param api implemented API
* @param uris URIs
* @return a new endpoint
*/
public static Endpoint restEndpoint(String api,
URI... uris) {
return urlEndpoint(api, ProtocolTypes.PROTOCOL_REST, uris);
}
/**
* Create a Web UI endpoint from a list of URIs
* @param api implemented API
* @param uris URIs
* @return a new endpoint
*/
public static Endpoint webEndpoint(String api,
URI... uris) {
return urlEndpoint(api, ProtocolTypes.PROTOCOL_WEBUI, uris);
}
/**
* Create an internet address endpoint from a list of URIs
* @param api implemented API
* @param protocolType protocol type
* @param hostname hostname/FQDN
* @param port port
* @return a new endpoint
*/
public static Endpoint inetAddrEndpoint(String api,
String protocolType,
String hostname,
int port) {
Preconditions.checkArgument(api != null, "null API");
Preconditions.checkArgument(protocolType != null, "null protocolType");
Preconditions.checkArgument(hostname != null, "null hostname");
return new Endpoint(api,
ADDRESS_HOSTNAME_AND_PORT,
protocolType,
hostnamePortPair(hostname, port));
}
/**
* Create an IPC endpoint
* @param api API
* @param address the address as a tuple of (hostname, port)
* @return the new endpoint
*/
public static Endpoint ipcEndpoint(String api, InetSocketAddress address) {
return new Endpoint(api,
ADDRESS_HOSTNAME_AND_PORT,
ProtocolTypes.PROTOCOL_HADOOP_IPC,
address== null ? null: hostnamePortPair(address));
}
/**
* Create a single entry map
* @param key map entry key
* @param val map entry value
* @return a 1 entry map.
*/
public static Map<String, String> map(String key, String val) {
Map<String, String> map = new HashMap<String, String>(1);
map.put(key, val);
return map;
}
/**
* Create a URI
* @param uri value
* @return a 1 entry map.
*/
public static Map<String, String> uri(String uri) {
return map(ADDRESS_URI, uri);
}
/**
* Create a (hostname, port) address pair
* @param hostname hostname
* @param port port
* @return a 1 entry map.
*/
public static Map<String, String> hostnamePortPair(String hostname, int port) {
Map<String, String> map =
map(ADDRESS_HOSTNAME_FIELD, hostname);
map.put(ADDRESS_PORT_FIELD, Integer.toString(port));
return map;
}
/**
* Create a (hostname, port) address pair
* @param address socket address whose hostname and port are used for the
* generated address.
* @return a 1 entry map.
*/
public static Map<String, String> hostnamePortPair(InetSocketAddress address) {
return hostnamePortPair(address.getHostName(), address.getPort());
}
/**
* Require a specific address type on an endpoint
* @param required required type
* @param epr endpoint
* @throws InvalidRecordException if the type is wrong
*/
public static void requireAddressType(String required, Endpoint epr) throws
InvalidRecordException {
if (!required.equals(epr.addressType)) {
throw new InvalidRecordException(
epr.toString(),
"Address type of " + epr.addressType
+ " does not match required type of "
+ required);
}
}
/**
* Get a single URI endpoint
* @param epr endpoint
* @return the uri of the first entry in the address list. Null if the endpoint
* itself is null
* @throws InvalidRecordException if the type is wrong, there are no addresses
* or the payload ill-formatted
*/
public static List<String> retrieveAddressesUriType(Endpoint epr)
throws InvalidRecordException {
if (epr == null) {
return null;
}
requireAddressType(ADDRESS_URI, epr);
List<Map<String, String>> addresses = epr.addresses;
if (addresses.size() < 1) {
throw new InvalidRecordException(epr.toString(),
"No addresses in endpoint");
}
List<String> results = new ArrayList<String>(addresses.size());
for (Map<String, String> address : addresses) {
results.add(getAddressField(address, ADDRESS_URI));
}
return results;
}
/**
* Get a specific field from an address -raising an exception if
* the field is not present
* @param address address to query
* @param field field to resolve
* @return the resolved value. Guaranteed to be non-null.
* @throws InvalidRecordException if the field did not resolve
*/
public static String getAddressField(Map<String, String> address,
String field) throws InvalidRecordException {
String val = address.get(field);
if (val == null) {
throw new InvalidRecordException("", "Missing address field: " + field);
}
return val;
}
/**
* Get the address URLs. Guranteed to return at least one address.
* @param epr endpoint
* @return the address as a URL
* @throws InvalidRecordException if the type is wrong, there are no addresses
* or the payload ill-formatted
* @throws MalformedURLException address can't be turned into a URL
*/
public static List<URL> retrieveAddressURLs(Endpoint epr)
throws InvalidRecordException, MalformedURLException {
if (epr == null) {
throw new InvalidRecordException("", "Null endpoint");
}
List<String> addresses = retrieveAddressesUriType(epr);
List<URL> results = new ArrayList<URL>(addresses.size());
for (String address : addresses) {
results.add(new URL(address));
}
return results;
}
/**
* Validate the record by checking for null fields and other invalid
* conditions
* @param path path for exceptions
* @param record record to validate. May be null
* @throws InvalidRecordException on invalid entries
*/
public static void validateServiceRecord(String path, ServiceRecord record)
throws InvalidRecordException {
if (record == null) {
throw new InvalidRecordException(path, "Null record");
}
if (!ServiceRecord.RECORD_TYPE.equals(record.type)) {
throw new InvalidRecordException(path,
"invalid record type field: \"" + record.type + "\"");
}
if (record.external != null) {
for (Endpoint endpoint : record.external) {
validateEndpoint(path, endpoint);
}
}
if (record.internal != null) {
for (Endpoint endpoint : record.internal) {
validateEndpoint(path, endpoint);
}
}
}
/**
* Validate the endpoint by checking for null fields and other invalid
* conditions
* @param path path for exceptions
* @param endpoint endpoint to validate. May be null
* @throws InvalidRecordException on invalid entries
*/
public static void validateEndpoint(String path, Endpoint endpoint)
throws InvalidRecordException {
if (endpoint == null) {
throw new InvalidRecordException(path, "Null endpoint");
}
try {
endpoint.validate();
} catch (RuntimeException e) {
throw new InvalidRecordException(path, e.toString());
}
}
}
| 9,234 | 30.735395 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/binding/JsonSerDeser.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.registry.client.binding;
import com.google.common.base.Preconditions;
import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.registry.client.exceptions.InvalidRecordException;
import org.apache.hadoop.registry.client.exceptions.NoRecordException;
import org.codehaus.jackson.JsonGenerationException;
import org.codehaus.jackson.JsonParseException;
import org.codehaus.jackson.JsonProcessingException;
import org.codehaus.jackson.map.DeserializationConfig;
import org.codehaus.jackson.map.JsonMappingException;
import org.codehaus.jackson.map.ObjectMapper;
import org.codehaus.jackson.map.SerializationConfig;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.DataOutputStream;
import java.io.EOFException;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.InputStream;
/**
* Support for marshalling objects to and from JSON.
* <p>
* It constructs an object mapper as an instance field.
* and synchronizes access to those methods
* which use the mapper
* @param <T> Type to marshal.
*/
@InterfaceAudience.Private()
@InterfaceStability.Evolving
public class JsonSerDeser<T> {
private static final Logger LOG = LoggerFactory.getLogger(JsonSerDeser.class);
private static final String UTF_8 = "UTF-8";
public static final String E_NO_DATA = "No data at path";
public static final String E_DATA_TOO_SHORT = "Data at path too short";
public static final String E_MISSING_MARKER_STRING =
"Missing marker string: ";
private final Class<T> classType;
private final ObjectMapper mapper;
/**
* Create an instance bound to a specific type
* @param classType class to marshall
*/
public JsonSerDeser(Class<T> classType) {
Preconditions.checkArgument(classType != null, "null classType");
this.classType = classType;
this.mapper = new ObjectMapper();
mapper.configure(DeserializationConfig.Feature.FAIL_ON_UNKNOWN_PROPERTIES,
false);
}
/**
* Get the simple name of the class type to be marshalled
* @return the name of the class being marshalled
*/
public String getName() {
return classType.getSimpleName();
}
/**
* Convert from JSON
*
* @param json input
* @return the parsed JSON
* @throws IOException IO
* @throws JsonMappingException failure to map from the JSON to this class
*/
@SuppressWarnings("unchecked")
public synchronized T fromJson(String json)
throws IOException, JsonParseException, JsonMappingException {
try {
return mapper.readValue(json, classType);
} catch (IOException e) {
LOG.error("Exception while parsing json : " + e + "\n" + json, e);
throw e;
}
}
/**
* Convert from a JSON file
* @param jsonFile input file
* @return the parsed JSON
* @throws IOException IO problems
* @throws JsonMappingException failure to map from the JSON to this class
*/
@SuppressWarnings("unchecked")
public synchronized T fromFile(File jsonFile)
throws IOException, JsonParseException, JsonMappingException {
try {
return mapper.readValue(jsonFile, classType);
} catch (IOException e) {
LOG.error("Exception while parsing json file {}: {}", jsonFile, e);
throw e;
}
}
/**
* Convert from a JSON file
* @param resource input file
* @return the parsed JSON
* @throws IOException IO problems
* @throws JsonMappingException failure to map from the JSON to this class
*/
@SuppressWarnings({"IOResourceOpenedButNotSafelyClosed"})
public synchronized T fromResource(String resource)
throws IOException, JsonParseException, JsonMappingException {
InputStream resStream = null;
try {
resStream = this.getClass().getResourceAsStream(resource);
if (resStream == null) {
throw new FileNotFoundException(resource);
}
return mapper.readValue(resStream, classType);
} catch (IOException e) {
LOG.error("Exception while parsing json resource {}: {}", resource, e);
throw e;
} finally {
IOUtils.closeStream(resStream);
}
}
/**
* clone by converting to JSON and back again.
* This is much less efficient than any Java clone process.
* @param instance instance to duplicate
* @return a new instance
* @throws IOException problems.
*/
public T fromInstance(T instance) throws IOException {
return fromJson(toJson(instance));
}
/**
* Load from a Hadoop filesystem
* @param fs filesystem
* @param path path
* @return a loaded CD
* @throws IOException IO problems
* @throws EOFException if not enough bytes were read in
* @throws JsonParseException parse problems
* @throws JsonMappingException O/J mapping problems
*/
public T load(FileSystem fs, Path path)
throws IOException, JsonParseException, JsonMappingException {
FileStatus status = fs.getFileStatus(path);
long len = status.getLen();
byte[] b = new byte[(int) len];
FSDataInputStream dataInputStream = fs.open(path);
int count = dataInputStream.read(b);
if (count != len) {
throw new EOFException(path.toString() + ": read finished prematurely");
}
return fromBytes(path.toString(), b);
}
/**
* Save a cluster description to a hadoop filesystem
* @param fs filesystem
* @param path path
* @param overwrite should any existing file be overwritten
* @throws IOException IO exception
*/
public void save(FileSystem fs, Path path, T instance,
boolean overwrite) throws
IOException {
FSDataOutputStream dataOutputStream = fs.create(path, overwrite);
writeJsonAsBytes(instance, dataOutputStream);
}
/**
* Write the json as bytes -then close the file
* @param dataOutputStream an outout stream that will always be closed
* @throws IOException on any failure
*/
private void writeJsonAsBytes(T instance,
DataOutputStream dataOutputStream) throws IOException {
try {
byte[] b = toBytes(instance);
dataOutputStream.write(b);
} finally {
dataOutputStream.close();
}
}
/**
* Convert JSON To bytes
* @param instance instance to convert
* @return a byte array
* @throws IOException
*/
public byte[] toBytes(T instance) throws IOException {
String json = toJson(instance);
return json.getBytes(UTF_8);
}
/**
* Deserialize from a byte array
* @param path path the data came from
* @param bytes byte array
* @throws IOException all problems
* @throws EOFException not enough data
* @throws InvalidRecordException if the parsing failed -the record is invalid
*/
public T fromBytes(String path, byte[] bytes) throws IOException,
InvalidRecordException {
return fromBytes(path, bytes, "");
}
/**
* Deserialize from a byte array, optionally checking for a marker string.
* <p>
* If the marker parameter is supplied (and not empty), then its presence
* will be verified before the JSON parsing takes place; it is a fast-fail
* check. If not found, an {@link InvalidRecordException} exception will be
* raised
* @param path path the data came from
* @param bytes byte array
* @param marker an optional string which, if set, MUST be present in the
* UTF-8 parsed payload.
* @return The parsed record
* @throws IOException all problems
* @throws EOFException not enough data
* @throws InvalidRecordException if the JSON parsing failed.
* @throws NoRecordException if the data is not considered a record: either
* it is too short or it did not contain the marker string.
*/
public T fromBytes(String path, byte[] bytes, String marker)
throws IOException, NoRecordException, InvalidRecordException {
int len = bytes.length;
if (len == 0 ) {
throw new NoRecordException(path, E_NO_DATA);
}
if (StringUtils.isNotEmpty(marker) && len < marker.length()) {
throw new NoRecordException(path, E_DATA_TOO_SHORT);
}
String json = new String(bytes, 0, len, UTF_8);
if (StringUtils.isNotEmpty(marker)
&& !json.contains(marker)) {
throw new NoRecordException(path, E_MISSING_MARKER_STRING + marker);
}
try {
return fromJson(json);
} catch (JsonProcessingException e) {
throw new InvalidRecordException(path, e.toString(), e);
}
}
/**
* Convert an instance to a JSON string
* @param instance instance to convert
* @return a JSON string description
* @throws JsonParseException parse problems
* @throws JsonMappingException O/J mapping problems
*/
public synchronized String toJson(T instance) throws IOException,
JsonGenerationException,
JsonMappingException {
mapper.configure(SerializationConfig.Feature.INDENT_OUTPUT, true);
return mapper.writeValueAsString(instance);
}
/**
* Convert an instance to a string form for output. This is a robust
* operation which will convert any JSON-generating exceptions into
* error text.
* @param instance non-null instance
* @return a JSON string
*/
public String toString(T instance) {
Preconditions.checkArgument(instance != null, "Null instance argument");
try {
return toJson(instance);
} catch (IOException e) {
return "Failed to convert to a string: " + e;
}
}
}
| 10,526 | 32.958065 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/exceptions/NoChildrenForEphemeralsException.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.registry.client.exceptions;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* This is a manifestation of the Zookeeper restrictions about
* what nodes may act as parents.
*
* Children are not allowed under ephemeral nodes. This is an aspect
* of ZK which isn't directly exposed to the registry API. It may
* surface if the registry is manipulated outside of the registry API.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class NoChildrenForEphemeralsException extends RegistryIOException {
public NoChildrenForEphemeralsException(String path, Throwable cause) {
super(path, cause);
}
public NoChildrenForEphemeralsException(String path, String error) {
super(path, error);
}
public NoChildrenForEphemeralsException(String path,
String error,
Throwable cause) {
super(path, error, cause);
}
}
| 1,767 | 35.081633 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/exceptions/package-info.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Registry Service Exceptions
* <p>
* These are the Registry-specific exceptions that may be raised during
* Registry operations.
* <p>
* Other exceptions may be raised, especially <code>IOExceptions</code>
* triggered by network problems, and <code>IllegalArgumentException</code>
* exceptions that may be raised if invalid (often null) arguments are passed
* to a method call.
* <p>
* All exceptions in this package are derived from
* {@link org.apache.hadoop.registry.client.exceptions.RegistryIOException}
*/
package org.apache.hadoop.registry.client.exceptions;
| 1,395 | 40.058824 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/exceptions/NoRecordException.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.registry.client.exceptions;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.registry.client.types.ServiceRecord;
/**
* Raised if there is no {@link ServiceRecord} resolved at the end
* of the specified path.
* <p>
* There may be valid data of some form at the end of the path, but it does
* not appear to be a Service Record.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class NoRecordException extends RegistryIOException {
public NoRecordException(String path, String error) {
super(path, error);
}
public NoRecordException(String path,
String error,
Throwable cause) {
super(path, error, cause);
}
}
| 1,590 | 33.586957 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/exceptions/InvalidRecordException.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.registry.client.exceptions;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* Raised if an attempt to parse a record failed.
*
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class InvalidRecordException extends RegistryIOException {
public InvalidRecordException(String path, String error) {
super(path, error);
}
public InvalidRecordException(String path,
String error,
Throwable cause) {
super(path, error, cause);
}
}
| 1,382 | 31.928571 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/exceptions/NoPathPermissionsException.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.registry.client.exceptions;
import org.apache.hadoop.fs.PathIOException;
/**
* Raised on path permission exceptions.
* <p>
* This is similar to PathIOException, except that exception doesn't let
*/
public class NoPathPermissionsException extends RegistryIOException {
public NoPathPermissionsException(String path, Throwable cause) {
super(path, cause);
}
public NoPathPermissionsException(String path, String error) {
super(path, error);
}
public NoPathPermissionsException(String path, String error, Throwable cause) {
super(path, error, cause);
}
public NoPathPermissionsException(String message,
PathIOException cause) {
super(message, cause);
}
}
| 1,532 | 32.326087 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/exceptions/RegistryIOException.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.registry.client.exceptions;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.PathIOException;
/**
* Base exception for registry operations.
* <p>
* These exceptions include the path of the failing operation wherever possible;
* this can be retrieved via {@link PathIOException#getPath()}.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class RegistryIOException extends PathIOException {
/**
* Build an exception from any other Path IO Exception.
* This propagates the path of the original exception
* @param message more specific text
* @param cause cause
*/
public RegistryIOException(String message, PathIOException cause) {
super(cause.getPath() != null ? cause.getPath().toString() : "",
message,
cause);
}
public RegistryIOException(String path, Throwable cause) {
super(path, cause);
}
public RegistryIOException(String path, String error) {
super(path, error);
}
public RegistryIOException(String path, String error, Throwable cause) {
super(path, error, cause);
}
}
| 2,000 | 32.915254 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/exceptions/AuthenticationFailedException.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.registry.client.exceptions;
/**
* Exception raised when client access wasn't authenticated.
* That is: the credentials provided were incomplete or invalid.
*/
public class AuthenticationFailedException extends RegistryIOException {
public AuthenticationFailedException(String path, Throwable cause) {
super(path, cause);
}
public AuthenticationFailedException(String path, String error) {
super(path, error);
}
public AuthenticationFailedException(String path,
String error,
Throwable cause) {
super(path, error, cause);
}
}
| 1,400 | 34.025 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/exceptions/InvalidPathnameException.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.registry.client.exceptions;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* A path name was invalid. This is raised when a path string has
* characters in it that are not permitted.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class InvalidPathnameException extends RegistryIOException {
public InvalidPathnameException(String path, String message) {
super(path, message);
}
public InvalidPathnameException(String path,
String message,
Throwable cause) {
super(path, message, cause);
}
}
| 1,452 | 34.439024 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/package-info.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Registry client services
* <p>
* These are classes which follow the YARN lifecycle and which implement
* the {@link org.apache.hadoop.registry.client.api.RegistryOperations}
* API.
*/
package org.apache.hadoop.registry.client.impl;
| 1,051 | 37.962963 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/RegistryOperationsClient.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.registry.client.impl;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.registry.client.impl.zk.RegistryBindingSource;
import org.apache.hadoop.registry.client.impl.zk.RegistryOperationsService;
/**
* This is the client service for applications to work with the registry.
*
* It does not set up the root paths for the registry, is bonded
* to a user, and can be set to use SASL, anonymous or id:pass auth.
*
* For SASL, the client must be operating in the context of an authed user.
*
* For id:pass the client must have the relevant id and password, SASL is
* not used even if the client has credentials.
*
* For anonymous, nothing is used.
*
* Any SASL-authed client also has the ability to add one or more authentication
* id:pass pair on all future writes, and to reset them later.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class RegistryOperationsClient extends RegistryOperationsService {
public RegistryOperationsClient(String name) {
super(name);
}
public RegistryOperationsClient(String name,
RegistryBindingSource bindingSource) {
super(name, bindingSource);
}
}
| 2,064 | 35.875 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/package-info.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Core Zookeeper support.
* <p>
* This package contains the low-level bindings to Curator and Zookeeper,
* including everything related to registry security.
* <p>
* The class {@link org.apache.hadoop.registry.client.impl.zk.CuratorService}
* is a YARN service which offers access to a Zookeeper instance via
* Apache Curator.
* <p>
* The {@link org.apache.hadoop.registry.client.impl.zk.RegistrySecurity}
* implements the security support in the registry, though a set of
* static methods and as a YARN service.
* <p>
* To work with ZK, system properties need to be set before invoking
* some operations/instantiating some objects. The definitions of these
* are kept in {@link org.apache.hadoop.registry.client.impl.zk.ZookeeperConfigOptions}.
*
*
*/
package org.apache.hadoop.registry.client.impl.zk;
| 1,634 | 39.875 | 88 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/CuratorService.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.registry.client.impl.zk;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import org.apache.curator.ensemble.EnsembleProvider;
import org.apache.curator.ensemble.fixed.FixedEnsembleProvider;
import org.apache.curator.framework.CuratorFramework;
import org.apache.curator.framework.CuratorFrameworkFactory;
import org.apache.curator.framework.api.BackgroundCallback;
import org.apache.curator.framework.api.CreateBuilder;
import org.apache.curator.framework.api.DeleteBuilder;
import org.apache.curator.framework.api.GetChildrenBuilder;
import org.apache.curator.retry.BoundedExponentialBackoffRetry;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileAlreadyExistsException;
import org.apache.hadoop.fs.PathIsNotEmptyDirectoryException;
import org.apache.hadoop.fs.PathNotFoundException;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.service.CompositeService;
import org.apache.hadoop.service.ServiceStateException;
import org.apache.hadoop.registry.client.api.RegistryConstants;
import org.apache.hadoop.registry.client.binding.RegistryPathUtils;
import org.apache.hadoop.registry.client.exceptions.AuthenticationFailedException;
import org.apache.hadoop.registry.client.exceptions.NoChildrenForEphemeralsException;
import org.apache.hadoop.registry.client.exceptions.NoPathPermissionsException;
import org.apache.hadoop.registry.client.exceptions.RegistryIOException;
import org.apache.zookeeper.CreateMode;
import org.apache.zookeeper.KeeperException;
import org.apache.zookeeper.ZooDefs;
import org.apache.zookeeper.data.ACL;
import org.apache.zookeeper.data.Stat;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.util.List;
/**
* This service binds to Zookeeper via Apache Curator. It is more
* generic than just the YARN service registry; it does not implement
* any of the Registry Operations API.
*/
@InterfaceAudience.Private
@InterfaceStability.Evolving
public class CuratorService extends CompositeService
implements RegistryConstants, RegistryBindingSource {
private static final Logger LOG =
LoggerFactory.getLogger(CuratorService.class);
/**
* the Curator binding
*/
private CuratorFramework curator;
/**
* Path to the registry root
*/
private String registryRoot;
/**
* Supplied binding source. This defaults to being this
* service itself.
*/
private final RegistryBindingSource bindingSource;
/**
* Security service
*/
private RegistrySecurity registrySecurity;
/**
* the connection binding text for messages
*/
private String connectionDescription;
/**
* Security connection diagnostics
*/
private String securityConnectionDiagnostics = "";
/**
* Provider of curator "ensemble"; offers a basis for
* more flexible bonding in future.
*/
private EnsembleProvider ensembleProvider;
/**
* Construct the service.
* @param name service name
* @param bindingSource source of binding information.
* If null: use this instance
*/
public CuratorService(String name, RegistryBindingSource bindingSource) {
super(name);
if (bindingSource != null) {
this.bindingSource = bindingSource;
} else {
this.bindingSource = this;
}
}
/**
* Create an instance using this service as the binding source (i.e. read
* configuration options from the registry)
* @param name service name
*/
public CuratorService(String name) {
this(name, null);
}
/**
* Init the service.
* This is where the security bindings are set up
* @param conf configuration of the service
* @throws Exception
*/
@Override
protected void serviceInit(Configuration conf) throws Exception {
registryRoot = conf.getTrimmed(KEY_REGISTRY_ZK_ROOT,
DEFAULT_ZK_REGISTRY_ROOT);
// create and add the registy service
registrySecurity = new RegistrySecurity("registry security");
addService(registrySecurity);
if (LOG.isDebugEnabled()) {
LOG.debug("Creating Registry with root {}", registryRoot);
}
super.serviceInit(conf);
}
/**
* Start the service.
* This is where the curator instance is started.
* @throws Exception
*/
@Override
protected void serviceStart() throws Exception {
super.serviceStart();
// create the curator; rely on the registry security code
// to set up the JVM context and curator
curator = createCurator();
}
/**
* Close the ZK connection if it is open
*/
@Override
protected void serviceStop() throws Exception {
IOUtils.closeStream(curator);
super.serviceStop();
}
/**
* Internal check that a service is in the live state
* @throws ServiceStateException if not
*/
private void checkServiceLive() throws ServiceStateException {
if (!isInState(STATE.STARTED)) {
throw new ServiceStateException(
"Service " + getName() + " is in wrong state: "
+ getServiceState());
}
}
/**
* Flag to indicate whether or not the registry is secure.
* Valid once the service is inited.
* @return service security policy
*/
public boolean isSecure() {
return registrySecurity.isSecureRegistry();
}
/**
* Get the registry security helper
* @return the registry security helper
*/
protected RegistrySecurity getRegistrySecurity() {
return registrySecurity;
}
/**
* Build the security diagnostics string
* @return a string for diagnostics
*/
protected String buildSecurityDiagnostics() {
// build up the security connection diags
if (!isSecure()) {
return "security disabled";
} else {
StringBuilder builder = new StringBuilder();
builder.append("secure cluster; ");
builder.append(registrySecurity.buildSecurityDiagnostics());
return builder.toString();
}
}
/**
* Create a new curator instance off the root path; using configuration
* options provided in the service configuration to set timeouts and
* retry policy.
* @return the newly created creator
*/
private CuratorFramework createCurator() throws IOException {
Configuration conf = getConfig();
createEnsembleProvider();
int sessionTimeout = conf.getInt(KEY_REGISTRY_ZK_SESSION_TIMEOUT,
DEFAULT_ZK_SESSION_TIMEOUT);
int connectionTimeout = conf.getInt(KEY_REGISTRY_ZK_CONNECTION_TIMEOUT,
DEFAULT_ZK_CONNECTION_TIMEOUT);
int retryTimes = conf.getInt(KEY_REGISTRY_ZK_RETRY_TIMES,
DEFAULT_ZK_RETRY_TIMES);
int retryInterval = conf.getInt(KEY_REGISTRY_ZK_RETRY_INTERVAL,
DEFAULT_ZK_RETRY_INTERVAL);
int retryCeiling = conf.getInt(KEY_REGISTRY_ZK_RETRY_CEILING,
DEFAULT_ZK_RETRY_CEILING);
if (LOG.isDebugEnabled()) {
LOG.debug("Creating CuratorService with connection {}",
connectionDescription);
}
CuratorFramework framework;
synchronized (CuratorService.class) {
// set the security options
// build up the curator itself
CuratorFrameworkFactory.Builder builder = CuratorFrameworkFactory.builder();
builder.ensembleProvider(ensembleProvider)
.connectionTimeoutMs(connectionTimeout)
.sessionTimeoutMs(sessionTimeout)
.retryPolicy(new BoundedExponentialBackoffRetry(retryInterval,
retryCeiling,
retryTimes));
// set up the builder AND any JVM context
registrySecurity.applySecurityEnvironment(builder);
//log them
securityConnectionDiagnostics = buildSecurityDiagnostics();
framework = builder.build();
framework.start();
}
return framework;
}
@Override
public String toString() {
return super.toString()
+ " " + bindingDiagnosticDetails();
}
/**
* Get the binding diagnostics
* @return a diagnostics string valid after the service is started.
*/
public String bindingDiagnosticDetails() {
return " Connection=\"" + connectionDescription + "\""
+ " root=\"" + registryRoot + "\""
+ " " + securityConnectionDiagnostics;
}
/**
* Create a full path from the registry root and the supplied subdir
* @param path path of operation
* @return an absolute path
* @throws IllegalArgumentException if the path is invalide
*/
protected String createFullPath(String path) throws IOException {
return RegistryPathUtils.createFullPath(registryRoot, path);
}
/**
* Get the registry binding source ... this can be used to
* create new ensemble providers
* @return the registry binding source in use
*/
public RegistryBindingSource getBindingSource() {
return bindingSource;
}
/**
* Create the ensemble provider for this registry, by invoking
* {@link RegistryBindingSource#supplyBindingInformation()} on
* the provider stored in {@link #bindingSource}
* Sets {@link #ensembleProvider} to that value;
* sets {@link #connectionDescription} to the binding info
* for use in toString and logging;
*
*/
protected void createEnsembleProvider() {
BindingInformation binding = bindingSource.supplyBindingInformation();
connectionDescription = binding.description
+ " " + securityConnectionDiagnostics;
ensembleProvider = binding.ensembleProvider;
}
/**
* Supply the binding information.
* This implementation returns a fixed ensemble bonded to
* the quorum supplied by {@link #buildConnectionString()}
* @return the binding information
*/
@Override
public BindingInformation supplyBindingInformation() {
BindingInformation binding = new BindingInformation();
String connectString = buildConnectionString();
binding.ensembleProvider = new FixedEnsembleProvider(connectString);
binding.description =
"fixed ZK quorum \"" + connectString + "\"";
return binding;
}
/**
* Override point: get the connection string used to connect to
* the ZK service
* @return a registry quorum
*/
protected String buildConnectionString() {
return getConfig().getTrimmed(KEY_REGISTRY_ZK_QUORUM,
DEFAULT_REGISTRY_ZK_QUORUM);
}
/**
* Create an IOE when an operation fails
* @param path path of operation
* @param operation operation attempted
* @param exception caught the exception caught
* @return an IOE to throw that contains the path and operation details.
*/
protected IOException operationFailure(String path,
String operation,
Exception exception) {
return operationFailure(path, operation, exception, null);
}
/**
* Create an IOE when an operation fails
* @param path path of operation
* @param operation operation attempted
* @param exception caught the exception caught
* @return an IOE to throw that contains the path and operation details.
*/
protected IOException operationFailure(String path,
String operation,
Exception exception,
List<ACL> acls) {
IOException ioe;
String aclList = "[" + RegistrySecurity.aclsToString(acls) + "]";
if (exception instanceof KeeperException.NoNodeException) {
ioe = new PathNotFoundException(path);
} else if (exception instanceof KeeperException.NodeExistsException) {
ioe = new FileAlreadyExistsException(path);
} else if (exception instanceof KeeperException.NoAuthException) {
ioe = new NoPathPermissionsException(path,
"Not authorized to access path; ACLs: " + aclList);
} else if (exception instanceof KeeperException.NotEmptyException) {
ioe = new PathIsNotEmptyDirectoryException(path);
} else if (exception instanceof KeeperException.AuthFailedException) {
ioe = new AuthenticationFailedException(path,
"Authentication Failed: " + exception
+ "; " + securityConnectionDiagnostics,
exception);
} else if (exception instanceof KeeperException.NoChildrenForEphemeralsException) {
ioe = new NoChildrenForEphemeralsException(path,
"Cannot create a path under an ephemeral node: " + exception,
exception);
} else if (exception instanceof KeeperException.InvalidACLException) {
// this is a security exception of a kind
// include the ACLs to help the diagnostics
StringBuilder builder = new StringBuilder();
builder.append("Path access failure ").append(aclList);
builder.append(" ");
builder.append(securityConnectionDiagnostics);
ioe = new NoPathPermissionsException(path, builder.toString());
} else {
ioe = new RegistryIOException(path,
"Failure of " + operation + " on " + path + ": " +
exception.toString(),
exception);
}
if (ioe.getCause() == null) {
ioe.initCause(exception);
}
return ioe;
}
/**
* Create a path if it does not exist.
* The check is poll + create; there's a risk that another process
* may create the same path before the create() operation is executed/
* propagated to the ZK node polled.
*
* @param path path to create
* @param acl ACL for path -used when creating a new entry
* @param createParents flag to trigger parent creation
* @return true iff the path was created
* @throws IOException
*/
@VisibleForTesting
public boolean maybeCreate(String path,
CreateMode mode,
List<ACL> acl,
boolean createParents) throws IOException {
return zkMkPath(path, mode, createParents, acl);
}
/**
* Stat the file
* @param path path of operation
* @return a curator stat entry
* @throws IOException on a failure
* @throws PathNotFoundException if the path was not found
*/
public Stat zkStat(String path) throws IOException {
checkServiceLive();
String fullpath = createFullPath(path);
Stat stat;
try {
if (LOG.isDebugEnabled()) {
LOG.debug("Stat {}", fullpath);
}
stat = curator.checkExists().forPath(fullpath);
} catch (Exception e) {
throw operationFailure(fullpath, "read()", e);
}
if (stat == null) {
throw new PathNotFoundException(path);
}
return stat;
}
/**
* Get the ACLs of a path
* @param path path of operation
* @return a possibly empty list of ACLs
* @throws IOException
*/
public List<ACL> zkGetACLS(String path) throws IOException {
checkServiceLive();
String fullpath = createFullPath(path);
List<ACL> acls;
try {
if (LOG.isDebugEnabled()) {
LOG.debug("GetACLS {}", fullpath);
}
acls = curator.getACL().forPath(fullpath);
} catch (Exception e) {
throw operationFailure(fullpath, "read()", e);
}
if (acls == null) {
throw new PathNotFoundException(path);
}
return acls;
}
/**
* Probe for a path existing
* @param path path of operation
* @return true if the path was visible from the ZK server
* queried.
* @throws IOException on any exception other than
* {@link PathNotFoundException}
*/
public boolean zkPathExists(String path) throws IOException {
checkServiceLive();
try {
// if zkStat(path) returns without throwing an exception, the return value
// is guaranteed to be not null
zkStat(path);
return true;
} catch (PathNotFoundException e) {
return false;
} catch (IOException e) {
throw e;
}
}
/**
* Verify a path exists
* @param path path of operation
* @throws PathNotFoundException if the path is absent
* @throws IOException
*/
public String zkPathMustExist(String path) throws IOException {
zkStat(path);
return path;
}
/**
* Create a directory. It is not an error if it already exists
* @param path path to create
* @param mode mode for path
* @param createParents flag to trigger parent creation
* @param acls ACL for path
* @throws IOException any problem
*/
public boolean zkMkPath(String path,
CreateMode mode,
boolean createParents,
List<ACL> acls)
throws IOException {
checkServiceLive();
path = createFullPath(path);
if (acls == null || acls.isEmpty()) {
throw new NoPathPermissionsException(path, "Empty ACL list");
}
try {
RegistrySecurity.AclListInfo aclInfo =
new RegistrySecurity.AclListInfo(acls);
if (LOG.isDebugEnabled()) {
LOG.debug("Creating path {} with mode {} and ACL {}",
path, mode, aclInfo);
}
CreateBuilder createBuilder = curator.create();
createBuilder.withMode(mode).withACL(acls);
if (createParents) {
createBuilder.creatingParentsIfNeeded();
}
createBuilder.forPath(path);
} catch (KeeperException.NodeExistsException e) {
if (LOG.isDebugEnabled()) {
LOG.debug("path already present: {}", path, e);
}
return false;
} catch (Exception e) {
throw operationFailure(path, "mkdir() ", e, acls);
}
return true;
}
/**
* Recursively make a path
* @param path path to create
* @param acl ACL for path
* @throws IOException any problem
*/
public void zkMkParentPath(String path,
List<ACL> acl) throws
IOException {
// split path into elements
zkMkPath(RegistryPathUtils.parentOf(path),
CreateMode.PERSISTENT, true, acl);
}
/**
* Create a path with given data. byte[0] is used for a path
* without data
* @param path path of operation
* @param data initial data
* @param acls
* @throws IOException
*/
public void zkCreate(String path,
CreateMode mode,
byte[] data,
List<ACL> acls) throws IOException {
Preconditions.checkArgument(data != null, "null data");
checkServiceLive();
String fullpath = createFullPath(path);
try {
if (LOG.isDebugEnabled()) {
LOG.debug("Creating {} with {} bytes of data and ACL {}",
fullpath, data.length,
new RegistrySecurity.AclListInfo(acls));
}
curator.create().withMode(mode).withACL(acls).forPath(fullpath, data);
} catch (Exception e) {
throw operationFailure(fullpath, "create()", e, acls);
}
}
/**
* Update the data for a path
* @param path path of operation
* @param data new data
* @throws IOException
*/
public void zkUpdate(String path, byte[] data) throws IOException {
Preconditions.checkArgument(data != null, "null data");
checkServiceLive();
path = createFullPath(path);
try {
if (LOG.isDebugEnabled()) {
LOG.debug("Updating {} with {} bytes", path, data.length);
}
curator.setData().forPath(path, data);
} catch (Exception e) {
throw operationFailure(path, "update()", e);
}
}
/**
* Create or update an entry
* @param path path
* @param data data
* @param acl ACL for path -used when creating a new entry
* @param overwrite enable overwrite
* @throws IOException
* @return true if the entry was created, false if it was simply updated.
*/
public boolean zkSet(String path,
CreateMode mode,
byte[] data,
List<ACL> acl, boolean overwrite) throws IOException {
Preconditions.checkArgument(data != null, "null data");
checkServiceLive();
if (!zkPathExists(path)) {
zkCreate(path, mode, data, acl);
return true;
} else {
if (overwrite) {
zkUpdate(path, data);
return false;
} else {
throw new FileAlreadyExistsException(path);
}
}
}
/**
* Delete a directory/directory tree.
* It is not an error to delete a path that does not exist
* @param path path of operation
* @param recursive flag to trigger recursive deletion
* @param backgroundCallback callback; this being set converts the operation
* into an async/background operation.
* task
* @throws IOException on problems other than no-such-path
*/
public void zkDelete(String path,
boolean recursive,
BackgroundCallback backgroundCallback) throws IOException {
checkServiceLive();
String fullpath = createFullPath(path);
try {
if (LOG.isDebugEnabled()) {
LOG.debug("Deleting {}", fullpath);
}
DeleteBuilder delete = curator.delete();
if (recursive) {
delete.deletingChildrenIfNeeded();
}
if (backgroundCallback != null) {
delete.inBackground(backgroundCallback);
}
delete.forPath(fullpath);
} catch (KeeperException.NoNodeException e) {
// not an error
} catch (Exception e) {
throw operationFailure(fullpath, "delete()", e);
}
}
/**
* List all children of a path
* @param path path of operation
* @return a possibly empty list of children
* @throws IOException
*/
public List<String> zkList(String path) throws IOException {
checkServiceLive();
String fullpath = createFullPath(path);
try {
if (LOG.isDebugEnabled()) {
LOG.debug("ls {}", fullpath);
}
GetChildrenBuilder builder = curator.getChildren();
List<String> children = builder.forPath(fullpath);
return children;
} catch (Exception e) {
throw operationFailure(path, "ls()", e);
}
}
/**
* Read data on a path
* @param path path of operation
* @return the data
* @throws IOException read failure
*/
public byte[] zkRead(String path) throws IOException {
checkServiceLive();
String fullpath = createFullPath(path);
try {
if (LOG.isDebugEnabled()) {
LOG.debug("Reading {}", fullpath);
}
return curator.getData().forPath(fullpath);
} catch (Exception e) {
throw operationFailure(fullpath, "read()", e);
}
}
/**
* Return a path dumper instance which can do a full dump
* of the registry tree in its <code>toString()</code>
* operation
* @return a class to dump the registry
* @param verbose verbose flag - includes more details (such as ACLs)
*/
public ZKPathDumper dumpPath(boolean verbose) {
return new ZKPathDumper(curator, registryRoot, verbose);
}
/**
* Add a new write access entry for all future write operations.
* @param id ID to use
* @param pass password
* @throws IOException on any failure to build the digest
*/
public boolean addWriteAccessor(String id, String pass) throws IOException {
RegistrySecurity security = getRegistrySecurity();
ACL digestACL = new ACL(ZooDefs.Perms.ALL,
security.toDigestId(security.digest(id, pass)));
return security.addDigestACL(digestACL);
}
/**
* Clear all write accessors
*/
public void clearWriteAccessors() {
getRegistrySecurity().resetDigestACLs();
}
/**
* Diagnostics method to dump a registry robustly.
* Any exception raised is swallowed
* @param verbose verbose path dump
* @return the registry tree
*/
protected String dumpRegistryRobustly(boolean verbose) {
try {
ZKPathDumper pathDumper = dumpPath(verbose);
return pathDumper.toString();
} catch (Exception e) {
// ignore
LOG.debug("Ignoring exception: {}", e);
}
return "";
}
}
| 24,117 | 30.200517 | 87 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/RegistryBindingSource.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.registry.client.impl.zk;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* Interface which can be implemented by a registry binding source
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public interface RegistryBindingSource {
/**
* Supply the binding information for this registry
* @return the binding information data
*/
BindingInformation supplyBindingInformation();
}
| 1,310 | 34.432432 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/ZookeeperConfigOptions.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.registry.client.impl.zk;
import org.apache.zookeeper.client.ZooKeeperSaslClient;
import org.apache.zookeeper.server.ZooKeeperSaslServer;
/**
* Configuration options which are internal to Zookeeper,
* as well as some other ZK constants
* <p>
* Zookeeper options are passed via system properties prior to the ZK
* Methods/classes being invoked. This implies that:
* <ol>
* <li>There can only be one instance of a ZK client or service class
* in a single JVM —else their configuration options will conflict.</li>
* <li>It is safest to set these properties immediately before
* invoking ZK operations.</li>
* </ol>
*
*/
public interface ZookeeperConfigOptions {
/**
* Enable SASL secure clients: {@value}.
* This is usually set to true, with ZK set to fall back to
* non-SASL authentication if the SASL auth fails
* by the property
* {@link #PROP_ZK_SERVER_MAINTAIN_CONNECTION_DESPITE_SASL_FAILURE}.
* <p>
* As a result, clients will default to attempting SASL-authentication,
* but revert to classic authentication/anonymous access on failure.
*/
String PROP_ZK_ENABLE_SASL_CLIENT =
"zookeeper.sasl.client";
/**
* Default flag for the ZK client: {@value}.
*/
String DEFAULT_ZK_ENABLE_SASL_CLIENT = "true";
/**
* System property for the JAAS client context : {@value}.
*
* For SASL authentication to work, this must point to a
* context within the
*
* <p>
* Default value is derived from
* {@link ZooKeeperSaslClient#LOGIN_CONTEXT_NAME_KEY}
*/
String PROP_ZK_SASL_CLIENT_CONTEXT =
ZooKeeperSaslClient.LOGIN_CONTEXT_NAME_KEY;
/**
* The SASL client username: {@value}.
* <p>
* Set this to the <i>short</i> name of the client, e.g, "user",
* not {@code user/host}, or {@code user/host@REALM}
*/
String PROP_ZK_SASL_CLIENT_USERNAME = "zookeeper.sasl.client.username";
/**
* The SASL Server context, referring to a context in the JVM's
* JAAS context file: {@value}
*/
String PROP_ZK_SERVER_SASL_CONTEXT =
ZooKeeperSaslServer.LOGIN_CONTEXT_NAME_KEY;
/**
* Should ZK Server allow failed SASL clients to downgrade to classic
* authentication on a SASL auth failure: {@value}.
*/
String PROP_ZK_SERVER_MAINTAIN_CONNECTION_DESPITE_SASL_FAILURE =
"zookeeper.maintain_connection_despite_sasl_failure";
/**
* should the ZK Server Allow failed SASL clients: {@value}.
*/
String PROP_ZK_ALLOW_FAILED_SASL_CLIENTS =
"zookeeper.allowSaslFailedClients";
/**
* Kerberos realm of the server: {@value}.
*/
String PROP_ZK_SERVER_REALM = "zookeeper.server.realm";
/**
* Path to a kinit binary: {@value}.
* Defaults to <code>"/usr/bin/kinit"</code>
*/
String PROP_ZK_KINIT_PATH = "zookeeper.kinit";
/**
* ID scheme for SASL: {@value}.
*/
String SCHEME_SASL = "sasl";
/**
* ID scheme for digest auth: {@value}.
*/
String SCHEME_DIGEST = "digest";
}
| 3,791 | 30.865546 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/ZKPathDumper.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.registry.client.impl.zk;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import org.apache.curator.framework.CuratorFramework;
import org.apache.curator.framework.api.GetChildrenBuilder;
import org.apache.zookeeper.data.ACL;
import org.apache.zookeeper.data.Stat;
import java.util.List;
/**
* This class dumps a registry tree to a string.
* It does this in the <code>toString()</code> method, so it
* can be used in a log statement -the operation
* will only take place if the method is evaluated.
*
*/
@VisibleForTesting
public class ZKPathDumper {
public static final int INDENT = 2;
private final CuratorFramework curator;
private final String root;
private final boolean verbose;
/**
* Create a path dumper -but do not dump the path until asked
* @param curator curator instance
* @param root root
* @param verbose verbose flag - includes more details (such as ACLs)
*/
public ZKPathDumper(CuratorFramework curator,
String root,
boolean verbose) {
Preconditions.checkArgument(curator != null);
Preconditions.checkArgument(root != null);
this.curator = curator;
this.root = root;
this.verbose = verbose;
}
/**
* Trigger the recursive registry dump.
* @return a string view of the registry
*/
@Override
public String toString() {
StringBuilder builder = new StringBuilder();
builder.append("ZK tree for ").append(root).append('\n');
expand(builder, root, 1);
return builder.toString();
}
/**
* Recursively expand the path into the supplied string builder, increasing
* the indentation by {@link #INDENT} as it proceeds (depth first) down
* the tree
* @param builder string build to append to
* @param path path to examine
* @param indent current indentation
*/
private void expand(StringBuilder builder,
String path,
int indent) {
try {
GetChildrenBuilder childrenBuilder = curator.getChildren();
List<String> children = childrenBuilder.forPath(path);
for (String child : children) {
String childPath = path + "/" + child;
String body;
Stat stat = curator.checkExists().forPath(childPath);
StringBuilder bodyBuilder = new StringBuilder(256);
bodyBuilder.append(" [")
.append(stat.getDataLength())
.append("]");
if (stat.getEphemeralOwner() > 0) {
bodyBuilder.append("*");
}
if (verbose) {
// verbose: extract ACLs
builder.append(" -- ");
List<ACL> acls =
curator.getACL().forPath(childPath);
for (ACL acl : acls) {
builder.append(RegistrySecurity.aclToString(acl));
builder.append(" ");
}
}
body = bodyBuilder.toString();
// print each child
append(builder, indent, ' ');
builder.append('/').append(child);
builder.append(body);
builder.append('\n');
// recurse
expand(builder, childPath, indent + INDENT);
}
} catch (Exception e) {
builder.append(e.toString()).append("\n");
}
}
/**
* Append the specified indentation to a builder
* @param builder string build to append to
* @param indent current indentation
* @param c charactor to use for indentation
*/
private void append(StringBuilder builder, int indent, char c) {
for (int i = 0; i < indent; i++) {
builder.append(c);
}
}
}
| 4,382 | 31.708955 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/RegistryInternalConstants.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.registry.client.impl.zk;
import org.apache.zookeeper.ZooDefs;
/**
* Internal constants for the registry.
*
* These are the things which aren't visible to users.
*
*/
public interface RegistryInternalConstants {
/**
* Pattern of a single entry in the registry path. : {@value}.
* <p>
* This is what constitutes a valid hostname according to current RFCs.
* Alphanumeric first two and last one digit, alphanumeric
* and hyphens allowed in between.
* <p>
* No upper limit is placed on the size of an entry.
*/
String VALID_PATH_ENTRY_PATTERN =
"([a-z0-9]|[a-z0-9][a-z0-9\\-]*[a-z0-9])";
/**
* Permissions for readers: {@value}.
*/
int PERMISSIONS_REGISTRY_READERS = ZooDefs.Perms.READ;
/**
* Permissions for system services: {@value}
*/
int PERMISSIONS_REGISTRY_SYSTEM_SERVICES = ZooDefs.Perms.ALL;
/**
* Permissions for a user's root entry: {@value}.
* All except the admin permissions (ACL access) on a node
*/
int PERMISSIONS_REGISTRY_USER_ROOT =
ZooDefs.Perms.READ | ZooDefs.Perms.WRITE | ZooDefs.Perms.CREATE |
ZooDefs.Perms.DELETE;
/**
* Name of the SASL auth provider which has to be added to ZK server to enable
* sasl: auth patterns: {@value}.
*
* Without this callers can connect via SASL, but
* they can't use it in ACLs
*/
String SASLAUTHENTICATION_PROVIDER =
"org.apache.zookeeper.server.auth.SASLAuthenticationProvider";
/**
* String to use as the prefix when declaring a new auth provider: {@value}.
*/
String ZOOKEEPER_AUTH_PROVIDER = "zookeeper.authProvider";
/**
* This the Hadoop environment variable which propagates the identity
* of a user in an insecure cluster
*/
String HADOOP_USER_NAME = "HADOOP_USER_NAME";
}
| 2,610 | 30.841463 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/BindingInformation.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.registry.client.impl.zk;
import org.apache.curator.ensemble.EnsembleProvider;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* Binding information provided by a {@link RegistryBindingSource}
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class BindingInformation {
/**
* The Curator Ensemble Provider
*/
public EnsembleProvider ensembleProvider;
/**
* Any information that may be useful for diagnostics
*/
public String description;
}
| 1,388 | 32.071429 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/RegistryOperationsService.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.registry.client.impl.zk;
import com.google.common.base.Preconditions;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.registry.client.api.BindFlags;
import org.apache.hadoop.registry.client.api.RegistryOperations;
import org.apache.hadoop.registry.client.binding.RegistryTypeUtils;
import org.apache.hadoop.registry.client.binding.RegistryUtils;
import org.apache.hadoop.registry.client.binding.RegistryPathUtils;
import org.apache.hadoop.registry.client.exceptions.InvalidPathnameException;
import org.apache.hadoop.registry.client.exceptions.NoRecordException;
import org.apache.hadoop.registry.client.types.RegistryPathStatus;
import org.apache.hadoop.registry.client.types.ServiceRecord;
import org.apache.zookeeper.CreateMode;
import org.apache.zookeeper.data.ACL;
import org.apache.zookeeper.data.Stat;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.util.List;
/**
* The Registry operations service.
* <p>
* This service implements the {@link RegistryOperations}
* API by mapping the commands to zookeeper operations, and translating
* results and exceptions back into those specified by the API.
* <p>
* Factory methods should hide the detail that this has been implemented via
* the {@link CuratorService} by returning it cast to that
* {@link RegistryOperations} interface, rather than this implementation class.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class RegistryOperationsService extends CuratorService
implements RegistryOperations {
private static final Logger LOG =
LoggerFactory.getLogger(RegistryOperationsService.class);
private final RegistryUtils.ServiceRecordMarshal serviceRecordMarshal
= new RegistryUtils.ServiceRecordMarshal();
public RegistryOperationsService(String name) {
this(name, null);
}
public RegistryOperationsService() {
this("RegistryOperationsService");
}
public RegistryOperationsService(String name,
RegistryBindingSource bindingSource) {
super(name, bindingSource);
}
/**
* Get the aggregate set of ACLs the client should use
* to create directories
* @return the ACL list
*/
public List<ACL> getClientAcls() {
return getRegistrySecurity().getClientACLs();
}
/**
* Validate a path
* @param path path to validate
* @throws InvalidPathnameException if a path is considered invalid
*/
protected void validatePath(String path) throws InvalidPathnameException {
// currently no checks are performed
}
@Override
public boolean mknode(String path, boolean createParents) throws IOException {
validatePath(path);
return zkMkPath(path, CreateMode.PERSISTENT, createParents, getClientAcls());
}
@Override
public void bind(String path,
ServiceRecord record,
int flags) throws IOException {
Preconditions.checkArgument(record != null, "null record");
validatePath(path);
// validate the record before putting it
RegistryTypeUtils.validateServiceRecord(path, record);
LOG.info("Bound at {} : {}", path, record);
CreateMode mode = CreateMode.PERSISTENT;
byte[] bytes = serviceRecordMarshal.toBytes(record);
zkSet(path, mode, bytes, getClientAcls(),
((flags & BindFlags.OVERWRITE) != 0));
}
@Override
public ServiceRecord resolve(String path) throws IOException {
byte[] bytes = zkRead(path);
ServiceRecord record = serviceRecordMarshal.fromBytes(path,
bytes, ServiceRecord.RECORD_TYPE);
RegistryTypeUtils.validateServiceRecord(path, record);
return record;
}
@Override
public boolean exists(String path) throws IOException {
validatePath(path);
return zkPathExists(path);
}
@Override
public RegistryPathStatus stat(String path) throws IOException {
validatePath(path);
Stat stat = zkStat(path);
String name = RegistryPathUtils.lastPathEntry(path);
RegistryPathStatus status = new RegistryPathStatus(
name,
stat.getCtime(),
stat.getDataLength(),
stat.getNumChildren());
if (LOG.isDebugEnabled()) {
LOG.debug("Stat {} => {}", path, status);
}
return status;
}
@Override
public List<String> list(String path) throws IOException {
validatePath(path);
return zkList(path);
}
@Override
public void delete(String path, boolean recursive) throws IOException {
validatePath(path);
zkDelete(path, recursive, null);
}
}
| 5,383 | 31.829268 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/RegistrySecurity.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.registry.client.impl.zk;
import com.google.common.base.Preconditions;
import com.google.common.base.Splitter;
import com.google.common.collect.Lists;
import org.apache.commons.lang.StringUtils;
import org.apache.curator.framework.CuratorFrameworkFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authentication.util.KerberosUtil;
import org.apache.hadoop.service.AbstractService;
import org.apache.hadoop.service.ServiceStateException;
import org.apache.hadoop.util.ZKUtil;
import org.apache.zookeeper.Environment;
import org.apache.zookeeper.ZooDefs;
import org.apache.zookeeper.data.ACL;
import org.apache.zookeeper.data.Id;
import org.apache.zookeeper.server.auth.DigestAuthenticationProvider;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.security.auth.login.AppConfigurationEntry;
import java.io.File;
import java.io.IOException;
import java.lang.reflect.InvocationTargetException;
import java.security.NoSuchAlgorithmException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.ListIterator;
import java.util.Locale;
import java.util.concurrent.CopyOnWriteArrayList;
import static org.apache.hadoop.registry.client.impl.zk.ZookeeperConfigOptions.*;
import static org.apache.hadoop.registry.client.api.RegistryConstants.*;
/**
* Implement the registry security ... a self contained service for
* testability.
* <p>
* This class contains:
* <ol>
* <li>
* The registry security policy implementation, configuration reading, ACL
* setup and management
* </li>
* <li>Lots of static helper methods to aid security setup and debugging</li>
* </ol>
*/
public class RegistrySecurity extends AbstractService {
private static final Logger LOG =
LoggerFactory.getLogger(RegistrySecurity.class);
public static final String E_UNKNOWN_AUTHENTICATION_MECHANISM =
"Unknown/unsupported authentication mechanism; ";
/**
* there's no default user to add with permissions, so it would be
* impossible to create nodes with unrestricted user access
*/
public static final String E_NO_USER_DETERMINED_FOR_ACLS =
"No user for ACLs determinable from current user or registry option "
+ KEY_REGISTRY_USER_ACCOUNTS;
/**
* Error raised when the registry is tagged as secure but this
* process doesn't have hadoop security enabled.
*/
public static final String E_NO_KERBEROS =
"Registry security is enabled -but Hadoop security is not enabled";
/**
* Access policy options
*/
private enum AccessPolicy {
anon, sasl, digest
}
/**
* Access mechanism
*/
private AccessPolicy access;
/**
* User used for digest auth
*/
private String digestAuthUser;
/**
* Password used for digest auth
*/
private String digestAuthPassword;
/**
* Auth data used for digest auth
*/
private byte[] digestAuthData;
/**
* flag set to true if the registry has security enabled.
*/
private boolean secureRegistry;
/**
* An ACL with read-write access for anyone
*/
public static final ACL ALL_READWRITE_ACCESS =
new ACL(ZooDefs.Perms.ALL, ZooDefs.Ids.ANYONE_ID_UNSAFE);
/**
* An ACL with read access for anyone
*/
public static final ACL ALL_READ_ACCESS =
new ACL(ZooDefs.Perms.READ, ZooDefs.Ids.ANYONE_ID_UNSAFE);
/**
* An ACL list containing the {@link #ALL_READWRITE_ACCESS} entry.
* It is copy on write so can be shared without worry
*/
public static final List<ACL> WorldReadWriteACL;
static {
List<ACL> acls = new ArrayList<ACL>();
acls.add(ALL_READWRITE_ACCESS);
WorldReadWriteACL = new CopyOnWriteArrayList<ACL>(acls);
}
/**
* the list of system ACLs
*/
private final List<ACL> systemACLs = new ArrayList<ACL>();
/**
* A list of digest ACLs which can be added to permissions
* —and cleared later.
*/
private final List<ACL> digestACLs = new ArrayList<ACL>();
/**
* the default kerberos realm
*/
private String kerberosRealm;
/**
* Client context
*/
private String jaasClientContext;
/**
* Client identity
*/
private String jaasClientIdentity;
/**
* Create an instance
* @param name service name
*/
public RegistrySecurity(String name) {
super(name);
}
/**
* Init the service: this sets up security based on the configuration
* @param conf configuration
* @throws Exception
*/
@Override
protected void serviceInit(Configuration conf) throws Exception {
super.serviceInit(conf);
String auth = conf.getTrimmed(KEY_REGISTRY_CLIENT_AUTH,
REGISTRY_CLIENT_AUTH_ANONYMOUS);
switch (auth) {
case REGISTRY_CLIENT_AUTH_KERBEROS:
access = AccessPolicy.sasl;
break;
case REGISTRY_CLIENT_AUTH_DIGEST:
access = AccessPolicy.digest;
break;
case REGISTRY_CLIENT_AUTH_ANONYMOUS:
access = AccessPolicy.anon;
break;
default:
throw new ServiceStateException(E_UNKNOWN_AUTHENTICATION_MECHANISM
+ "\"" + auth + "\"");
}
initSecurity();
}
/**
* Init security.
*
* After this operation, the {@link #systemACLs} list is valid.
* @throws IOException
*/
private void initSecurity() throws IOException {
secureRegistry =
getConfig().getBoolean(KEY_REGISTRY_SECURE, DEFAULT_REGISTRY_SECURE);
systemACLs.clear();
if (secureRegistry) {
addSystemACL(ALL_READ_ACCESS);
// determine the kerberos realm from JVM and settings
kerberosRealm = getConfig().get(KEY_REGISTRY_KERBEROS_REALM,
getDefaultRealmInJVM());
// System Accounts
String system = getOrFail(KEY_REGISTRY_SYSTEM_ACCOUNTS,
DEFAULT_REGISTRY_SYSTEM_ACCOUNTS);
systemACLs.addAll(buildACLs(system, kerberosRealm, ZooDefs.Perms.ALL));
// user accounts (may be empty, but for digest one user AC must
// be built up
String user = getConfig().get(KEY_REGISTRY_USER_ACCOUNTS,
DEFAULT_REGISTRY_USER_ACCOUNTS);
List<ACL> userACLs = buildACLs(user, kerberosRealm, ZooDefs.Perms.ALL);
// add self if the current user can be determined
ACL self;
if (UserGroupInformation.isSecurityEnabled()) {
self = createSaslACLFromCurrentUser(ZooDefs.Perms.ALL);
if (self != null) {
userACLs.add(self);
}
}
// here check for UGI having secure on or digest + ID
switch (access) {
case sasl:
// secure + SASL => has to be authenticated
if (!UserGroupInformation.isSecurityEnabled()) {
throw new IOException("Kerberos required for secure registry access");
}
UserGroupInformation currentUser =
UserGroupInformation.getCurrentUser();
jaasClientContext = getOrFail(KEY_REGISTRY_CLIENT_JAAS_CONTEXT,
DEFAULT_REGISTRY_CLIENT_JAAS_CONTEXT);
jaasClientIdentity = currentUser.getShortUserName();
if (LOG.isDebugEnabled()) {
LOG.debug("Auth is SASL user=\"{}\" JAAS context=\"{}\"",
jaasClientIdentity,
jaasClientContext);
}
break;
case digest:
String id = getOrFail(KEY_REGISTRY_CLIENT_AUTHENTICATION_ID, "");
String pass = getOrFail(KEY_REGISTRY_CLIENT_AUTHENTICATION_PASSWORD, "");
if (userACLs.isEmpty()) {
//
throw new ServiceStateException(E_NO_USER_DETERMINED_FOR_ACLS);
}
digest(id, pass);
ACL acl = new ACL(ZooDefs.Perms.ALL, toDigestId(id, pass));
userACLs.add(acl);
digestAuthUser = id;
digestAuthPassword = pass;
String authPair = id + ":" + pass;
digestAuthData = authPair.getBytes("UTF-8");
if (LOG.isDebugEnabled()) {
LOG.debug("Auth is Digest ACL: {}", aclToString(acl));
}
break;
case anon:
// nothing is needed; account is read only.
if (LOG.isDebugEnabled()) {
LOG.debug("Auth is anonymous");
}
userACLs = new ArrayList<ACL>(0);
break;
}
systemACLs.addAll(userACLs);
} else {
if (LOG.isDebugEnabled()) {
LOG.debug("Registry has no security");
}
// wide open cluster, adding system acls
systemACLs.addAll(WorldReadWriteACL);
}
}
/**
* Add another system ACL
* @param acl add ACL
*/
public void addSystemACL(ACL acl) {
systemACLs.add(acl);
}
/**
* Add a digest ACL
* @param acl add ACL
*/
public boolean addDigestACL(ACL acl) {
if (secureRegistry) {
if (LOG.isDebugEnabled()) {
LOG.debug("Added ACL {}", aclToString(acl));
}
digestACLs.add(acl);
return true;
} else {
if (LOG.isDebugEnabled()) {
LOG.debug("Ignoring added ACL - registry is insecure{}",
aclToString(acl));
}
return false;
}
}
/**
* Reset the digest ACL list
*/
public void resetDigestACLs() {
if (LOG.isDebugEnabled()) {
LOG.debug("Cleared digest ACLs");
}
digestACLs.clear();
}
/**
* Flag to indicate the cluster is secure
* @return true if the config enabled security
*/
public boolean isSecureRegistry() {
return secureRegistry;
}
/**
* Get the system principals
* @return the system principals
*/
public List<ACL> getSystemACLs() {
Preconditions.checkNotNull(systemACLs, "registry security is unitialized");
return Collections.unmodifiableList(systemACLs);
}
/**
* Get all ACLs needed for a client to use when writing to the repo.
* That is: system ACLs, its own ACL, any digest ACLs
* @return the client ACLs
*/
public List<ACL> getClientACLs() {
List<ACL> clientACLs = new ArrayList<ACL>(systemACLs);
clientACLs.addAll(digestACLs);
return clientACLs;
}
/**
* Create a SASL ACL for the user
* @param perms permissions
* @return an ACL for the current user or null if they aren't a kerberos user
* @throws IOException
*/
public ACL createSaslACLFromCurrentUser(int perms) throws IOException {
UserGroupInformation currentUser = UserGroupInformation.getCurrentUser();
if (currentUser.hasKerberosCredentials()) {
return createSaslACL(currentUser, perms);
} else {
return null;
}
}
/**
* Given a UGI, create a SASL ACL from it
* @param ugi UGI
* @param perms permissions
* @return a new ACL
*/
public ACL createSaslACL(UserGroupInformation ugi, int perms) {
String userName = ugi.getUserName();
return new ACL(perms, new Id(SCHEME_SASL, userName));
}
/**
* Get a conf option, throw an exception if it is null/empty
* @param key key
* @param defval default value
* @return the value
* @throws IOException if missing
*/
private String getOrFail(String key, String defval) throws IOException {
String val = getConfig().get(key, defval);
if (StringUtils.isEmpty(val)) {
throw new IOException("Missing value for configuration option " + key);
}
return val;
}
/**
* Check for an id:password tuple being valid.
* This test is stricter than that in {@link DigestAuthenticationProvider},
* which splits the string, but doesn't check the contents of each
* half for being non-"".
* @param idPasswordPair id:pass pair
* @return true if the pass is considered valid.
*/
public boolean isValid(String idPasswordPair) {
String[] parts = idPasswordPair.split(":");
return parts.length == 2
&& !StringUtils.isEmpty(parts[0])
&& !StringUtils.isEmpty(parts[1]);
}
/**
* Get the derived kerberos realm.
* @return this is built from the JVM realm, or the configuration if it
* overrides it. If "", it means "don't know".
*/
public String getKerberosRealm() {
return kerberosRealm;
}
/**
* Generate a base-64 encoded digest of the idPasswordPair pair
* @param idPasswordPair id:password
* @return a string that can be used for authentication
*/
public String digest(String idPasswordPair) throws IOException {
if (StringUtils.isEmpty(idPasswordPair) || !isValid(idPasswordPair)) {
throw new IOException("Invalid id:password: " + idPasswordPair);
}
try {
return DigestAuthenticationProvider.generateDigest(idPasswordPair);
} catch (NoSuchAlgorithmException e) {
// unlikely since it is standard to the JVM, but maybe JCE restrictions
// could trigger it
throw new IOException(e.toString(), e);
}
}
/**
* Generate a base-64 encoded digest of the idPasswordPair pair
* @param id ID
* @param password pass
* @return a string that can be used for authentication
* @throws IOException
*/
public String digest(String id, String password) throws IOException {
return digest(id + ":" + password);
}
/**
* Given a digest, create an ID from it
* @param digest digest
* @return ID
*/
public Id toDigestId(String digest) {
return new Id(SCHEME_DIGEST, digest);
}
/**
* Create a Digest ID from an id:pass pair
* @param id ID
* @param password password
* @return an ID
* @throws IOException
*/
public Id toDigestId(String id, String password) throws IOException {
return toDigestId(digest(id, password));
}
/**
* Split up a list of the form
* <code>sasl:mapred@,digest:5f55d66, sasl@[email protected]</code>
* into a list of possible ACL values, trimming as needed
*
* The supplied realm is added to entries where
* <ol>
* <li>the string begins "sasl:"</li>
* <li>the string ends with "@"</li>
* </ol>
* No attempt is made to validate any of the acl patterns.
*
* @param aclString list of 0 or more ACLs
* @param realm realm to add
* @return a list of split and potentially patched ACL pairs.
*
*/
public List<String> splitAclPairs(String aclString, String realm) {
List<String> list = Lists.newArrayList(
Splitter.on(',').omitEmptyStrings().trimResults()
.split(aclString));
ListIterator<String> listIterator = list.listIterator();
while (listIterator.hasNext()) {
String next = listIterator.next();
if (next.startsWith(SCHEME_SASL +":") && next.endsWith("@")) {
listIterator.set(next + realm);
}
}
return list;
}
/**
* Parse a string down to an ID, adding a realm if needed
* @param idPair id:data tuple
* @param realm realm to add
* @return the ID.
* @throws IllegalArgumentException if the idPair is invalid
*/
public Id parse(String idPair, String realm) {
int firstColon = idPair.indexOf(':');
int lastColon = idPair.lastIndexOf(':');
if (firstColon == -1 || lastColon == -1 || firstColon != lastColon) {
throw new IllegalArgumentException(
"ACL '" + idPair + "' not of expected form scheme:id");
}
String scheme = idPair.substring(0, firstColon);
String id = idPair.substring(firstColon + 1);
if (id.endsWith("@")) {
Preconditions.checkArgument(
StringUtils.isNotEmpty(realm),
"@ suffixed account but no realm %s", id);
id = id + realm;
}
return new Id(scheme, id);
}
/**
* Parse the IDs, adding a realm if needed, setting the permissions
* @param principalList id string
* @param realm realm to add
* @param perms permissions
* @return the relevant ACLs
* @throws IOException
*/
public List<ACL> buildACLs(String principalList, String realm, int perms)
throws IOException {
List<String> aclPairs = splitAclPairs(principalList, realm);
List<ACL> ids = new ArrayList<ACL>(aclPairs.size());
for (String aclPair : aclPairs) {
ACL newAcl = new ACL();
newAcl.setId(parse(aclPair, realm));
newAcl.setPerms(perms);
ids.add(newAcl);
}
return ids;
}
/**
* Parse an ACL list. This includes configuration indirection
* {@link ZKUtil#resolveConfIndirection(String)}
* @param zkAclConf configuration string
* @return an ACL list
* @throws IOException on a bad ACL parse
*/
public List<ACL> parseACLs(String zkAclConf) throws IOException {
try {
return ZKUtil.parseACLs(ZKUtil.resolveConfIndirection(zkAclConf));
} catch (ZKUtil.BadAclFormatException e) {
throw new IOException("Parsing " + zkAclConf + " :" + e, e);
}
}
/**
* Get the appropriate Kerberos Auth module for JAAS entries
* for this JVM.
* @return a JVM-specific kerberos login module classname.
*/
public static String getKerberosAuthModuleForJVM() {
if (System.getProperty("java.vendor").contains("IBM")) {
return "com.ibm.security.auth.module.Krb5LoginModule";
} else {
return "com.sun.security.auth.module.Krb5LoginModule";
}
}
/**
* JAAS template: {@value}
* Note the semicolon on the last entry
*/
private static final String JAAS_ENTRY =
"%s { %n"
+ " %s required%n"
// kerberos module
+ " keyTab=\"%s\"%n"
+ " debug=true%n"
+ " principal=\"%s\"%n"
+ " useKeyTab=true%n"
+ " useTicketCache=false%n"
+ " doNotPrompt=true%n"
+ " storeKey=true;%n"
+ "}; %n"
;
/**
* Create a JAAS entry for insertion
* @param context context of the entry
* @param principal kerberos principal
* @param keytab keytab
* @return a context
*/
public String createJAASEntry(
String context,
String principal,
File keytab) {
Preconditions.checkArgument(StringUtils.isNotEmpty(principal),
"invalid principal");
Preconditions.checkArgument(StringUtils.isNotEmpty(context),
"invalid context");
Preconditions.checkArgument(keytab != null && keytab.isFile(),
"Keytab null or missing: ");
String keytabpath = keytab.getAbsolutePath();
// fix up for windows; no-op on unix
keytabpath = keytabpath.replace('\\', '/');
return String.format(
Locale.ENGLISH,
JAAS_ENTRY,
context,
getKerberosAuthModuleForJVM(),
keytabpath,
principal);
}
/**
* Bind the JVM JAS setting to the specified JAAS file.
*
* <b>Important:</b> once a file has been loaded the JVM doesn't pick up
* changes
* @param jaasFile the JAAS file
*/
public static void bindJVMtoJAASFile(File jaasFile) {
String path = jaasFile.getAbsolutePath();
if (LOG.isDebugEnabled()) {
LOG.debug("Binding {} to {}", Environment.JAAS_CONF_KEY, path);
}
System.setProperty(Environment.JAAS_CONF_KEY, path);
}
/**
* Set the Zookeeper server property
* {@link ZookeeperConfigOptions#PROP_ZK_SERVER_SASL_CONTEXT}
* to the SASL context. When the ZK server starts, this is the context
* which it will read in
* @param contextName the name of the context
*/
public static void bindZKToServerJAASContext(String contextName) {
System.setProperty(PROP_ZK_SERVER_SASL_CONTEXT, contextName);
}
/**
* Reset any system properties related to JAAS
*/
public static void clearJaasSystemProperties() {
System.clearProperty(Environment.JAAS_CONF_KEY);
}
/**
* Resolve the context of an entry. This is an effective test of
* JAAS setup, because it will relay detected problems up
* @param context context name
* @return the entry
* @throws RuntimeException if there is no context entry found
*/
public static AppConfigurationEntry[] validateContext(String context) {
if (context == null) {
throw new RuntimeException("Null context argument");
}
if (context.isEmpty()) {
throw new RuntimeException("Empty context argument");
}
javax.security.auth.login.Configuration configuration =
javax.security.auth.login.Configuration.getConfiguration();
AppConfigurationEntry[] entries =
configuration.getAppConfigurationEntry(context);
if (entries == null) {
throw new RuntimeException(
String.format("Entry \"%s\" not found; " +
"JAAS config = %s",
context,
describeProperty(Environment.JAAS_CONF_KEY) ));
}
return entries;
}
/**
* Apply the security environment to this curator instance. This
* may include setting up the ZK system properties for SASL
* @param builder curator builder
*/
public void applySecurityEnvironment(CuratorFrameworkFactory.Builder builder) {
if (isSecureRegistry()) {
switch (access) {
case anon:
clearZKSaslClientProperties();
break;
case digest:
// no SASL
clearZKSaslClientProperties();
builder.authorization(SCHEME_DIGEST, digestAuthData);
break;
case sasl:
// bind to the current identity and context within the JAAS file
setZKSaslClientProperties(jaasClientIdentity, jaasClientContext);
}
}
}
/**
* Set the client properties. This forces the ZK client into
* failing if it can't auth.
* <b>Important:</b>This is JVM-wide.
* @param username username
* @param context login context
* @throws RuntimeException if the context cannot be found in the current
* JAAS context
*/
public static void setZKSaslClientProperties(String username,
String context) {
RegistrySecurity.validateContext(context);
enableZookeeperClientSASL();
System.setProperty(PROP_ZK_SASL_CLIENT_USERNAME, username);
System.setProperty(PROP_ZK_SASL_CLIENT_CONTEXT, context);
}
/**
* Clear all the ZK SASL Client properties
* <b>Important:</b>This is JVM-wide
*/
public static void clearZKSaslClientProperties() {
disableZookeeperClientSASL();
System.clearProperty(PROP_ZK_SASL_CLIENT_CONTEXT);
System.clearProperty(PROP_ZK_SASL_CLIENT_USERNAME);
}
/**
* Turn ZK SASL on
* <b>Important:</b>This is JVM-wide
*/
protected static void enableZookeeperClientSASL() {
System.setProperty(PROP_ZK_ENABLE_SASL_CLIENT, "true");
}
/**
* Force disable ZK SASL bindings.
* <b>Important:</b>This is JVM-wide
*/
public static void disableZookeeperClientSASL() {
System.setProperty(ZookeeperConfigOptions.PROP_ZK_ENABLE_SASL_CLIENT, "false");
}
/**
* Is the system property enabling the SASL client set?
* @return true if the SASL client system property is set.
*/
public static boolean isClientSASLEnabled() {
return Boolean.valueOf(System.getProperty(
ZookeeperConfigOptions.PROP_ZK_ENABLE_SASL_CLIENT, "true"));
}
/**
* Log details about the current Hadoop user at INFO.
* Robust against IOEs when trying to get the current user
*/
public void logCurrentHadoopUser() {
try {
UserGroupInformation currentUser = UserGroupInformation.getCurrentUser();
LOG.info("Current user = {}",currentUser);
UserGroupInformation realUser = currentUser.getRealUser();
LOG.info("Real User = {}" , realUser);
} catch (IOException e) {
LOG.warn("Failed to get current user {}, {}", e);
}
}
/**
* Stringify a list of ACLs for logging. Digest ACLs have their
* digest values stripped for security.
* @param acls ACL list
* @return a string for logs, exceptions, ...
*/
public static String aclsToString(List<ACL> acls) {
StringBuilder builder = new StringBuilder();
if (acls == null) {
builder.append("null ACL");
} else {
builder.append('\n');
for (ACL acl : acls) {
builder.append(aclToString(acl))
.append(" ");
}
}
return builder.toString();
}
/**
* Convert an ACL to a string, with any obfuscation needed
* @param acl ACL
* @return ACL string value
*/
public static String aclToString(ACL acl) {
return String.format(Locale.ENGLISH,
"0x%02x: %s",
acl.getPerms(),
idToString(acl.getId())
);
}
/**
* Convert an ID to a string, stripping out all but the first few characters
* of any digest auth hash for security reasons
* @param id ID
* @return a string description of a Zookeeper ID
*/
public static String idToString(Id id) {
String s;
if (id.getScheme().equals(SCHEME_DIGEST)) {
String ids = id.getId();
int colon = ids.indexOf(':');
if (colon > 0) {
ids = ids.substring(colon + 3);
}
s = SCHEME_DIGEST + ": " + ids;
} else {
s = id.toString();
}
return s;
}
/**
* Build up low-level security diagnostics to aid debugging
* @return a string to use in diagnostics
*/
public String buildSecurityDiagnostics() {
StringBuilder builder = new StringBuilder();
builder.append(secureRegistry ? "secure registry; "
: "insecure registry; ");
builder.append("Curator service access policy: ").append(access);
builder.append("; System ACLs: ").append(aclsToString(systemACLs));
builder.append("User: ").append(UgiInfo.fromCurrentUser());
builder.append("; Kerberos Realm: ").append(kerberosRealm);
builder.append(describeProperty(Environment.JAAS_CONF_KEY));
String sasl =
System.getProperty(PROP_ZK_ENABLE_SASL_CLIENT,
DEFAULT_ZK_ENABLE_SASL_CLIENT);
boolean saslEnabled = Boolean.valueOf(sasl);
builder.append(describeProperty(PROP_ZK_ENABLE_SASL_CLIENT,
DEFAULT_ZK_ENABLE_SASL_CLIENT));
if (saslEnabled) {
builder.append("; JAAS Client Identity")
.append("=")
.append(jaasClientIdentity)
.append("; ");
builder.append(KEY_REGISTRY_CLIENT_JAAS_CONTEXT)
.append("=")
.append(jaasClientContext)
.append("; ");
builder.append(describeProperty(PROP_ZK_SASL_CLIENT_USERNAME));
builder.append(describeProperty(PROP_ZK_SASL_CLIENT_CONTEXT));
}
builder.append(describeProperty(PROP_ZK_ALLOW_FAILED_SASL_CLIENTS,
"(undefined but defaults to true)"));
builder.append(describeProperty(
PROP_ZK_SERVER_MAINTAIN_CONNECTION_DESPITE_SASL_FAILURE));
return builder.toString();
}
private static String describeProperty(String name) {
return describeProperty(name, "(undefined)");
}
private static String describeProperty(String name, String def) {
return "; " + name + "=" + System.getProperty(name, def);
}
/**
* Get the default kerberos realm —returning "" if there
* is no realm or other problem
* @return the default realm of the system if it
* could be determined
*/
public static String getDefaultRealmInJVM() {
try {
return KerberosUtil.getDefaultRealm();
// JDK7
} catch (ClassNotFoundException ignored) {
// ignored
} catch (NoSuchMethodException ignored) {
// ignored
} catch (IllegalAccessException ignored) {
// ignored
} catch (InvocationTargetException ignored) {
// ignored
}
return "";
}
/**
* Create an ACL For a user.
* @param ugi User identity
* @return the ACL For the specified user. Ifthe username doesn't end
* in "@" then the realm is added
*/
public ACL createACLForUser(UserGroupInformation ugi, int perms) {
if (LOG.isDebugEnabled()) {
LOG.debug("Creating ACL For ", new UgiInfo(ugi));
}
if (!secureRegistry) {
return ALL_READWRITE_ACCESS;
} else {
return createACLfromUsername(ugi.getUserName(), perms);
}
}
/**
* Given a user name (short or long), create a SASL ACL
* @param username user name; if it doesn't contain an "@" symbol, the
* service's kerberos realm is added
* @param perms permissions
* @return an ACL for the user
*/
public ACL createACLfromUsername(String username, int perms) {
if (!username.contains("@")) {
username = username + "@" + kerberosRealm;
if (LOG.isDebugEnabled()) {
LOG.debug("Appending kerberos realm to make {}", username);
}
}
return new ACL(perms, new Id(SCHEME_SASL, username));
}
/**
* On demand string-ifier for UGI with extra details
*/
public static class UgiInfo {
public static UgiInfo fromCurrentUser() {
try {
return new UgiInfo(UserGroupInformation.getCurrentUser());
} catch (IOException e) {
LOG.info("Failed to get current user {}", e, e);
return new UgiInfo(null);
}
}
private final UserGroupInformation ugi;
public UgiInfo(UserGroupInformation ugi) {
this.ugi = ugi;
}
@Override
public String toString() {
if (ugi==null) {
return "(null ugi)";
}
StringBuilder builder = new StringBuilder();
builder.append(ugi.getUserName()).append(": ");
builder.append(ugi.toString());
builder.append(" hasKerberosCredentials=").append(
ugi.hasKerberosCredentials());
builder.append(" isFromKeytab=").append(ugi.isFromKeytab());
builder.append(" kerberos is enabled in Hadoop =").append(UserGroupInformation.isSecurityEnabled());
return builder.toString();
}
}
/**
* on-demand stringifier for a list of ACLs
*/
public static class AclListInfo {
public final List<ACL> acls;
public AclListInfo(List<ACL> acls) {
this.acls = acls;
}
@Override
public String toString() {
return aclsToString(acls);
}
}
}
| 30,392 | 29.271912 | 106 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/types/Endpoint.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.registry.client.types;
import com.google.common.base.Preconditions;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.registry.client.binding.JsonSerDeser;
import org.apache.hadoop.registry.client.binding.RegistryTypeUtils;
import org.codehaus.jackson.annotate.JsonIgnoreProperties;
import org.codehaus.jackson.map.annotate.JsonSerialize;
import java.net.URI;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
/**
* Description of a single service/component endpoint.
* It is designed to be marshalled as JSON.
* <p>
* Every endpoint can have more than one address entry, such as
* a list of URLs to a replicated service, or a (hostname, port)
* pair. Each of these address entries is represented as a string list,
* as that is the only reliably marshallable form of a tuple JSON can represent.
*
*
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
@JsonIgnoreProperties(ignoreUnknown = true)
@JsonSerialize(include = JsonSerialize.Inclusion.NON_NULL)
public final class Endpoint implements Cloneable {
/**
* API implemented at the end of the binding
*/
public String api;
/**
* Type of address. The standard types are defined in
* {@link AddressTypes}
*/
public String addressType;
/**
* Protocol type. Some standard types are defined in
* {@link ProtocolTypes}
*/
public String protocolType;
/**
* a list of address tuples —tuples whose format depends on the address type
*/
public List<Map<String, String>> addresses;
/**
* Create an empty instance.
*/
public Endpoint() {
}
/**
* Create an endpoint from another endpoint.
* This is a deep clone with a new list of addresses.
* @param that the endpoint to copy from
*/
public Endpoint(Endpoint that) {
this.api = that.api;
this.addressType = that.addressType;
this.protocolType = that.protocolType;
this.addresses = newAddresses(that.addresses.size());
for (Map<String, String> address : that.addresses) {
Map<String, String> addr2 = new HashMap<String, String>(address.size());
addr2.putAll(address);
addresses.add(addr2);
}
}
/**
* Build an endpoint with a list of addresses
* @param api API name
* @param addressType address type
* @param protocolType protocol type
* @param addrs addresses
*/
public Endpoint(String api,
String addressType,
String protocolType,
List<Map<String, String>> addrs) {
this.api = api;
this.addressType = addressType;
this.protocolType = protocolType;
this.addresses = newAddresses(0);
if (addrs != null) {
addresses.addAll(addrs);
}
}
/**
* Build an endpoint with an empty address list
* @param api API name
* @param addressType address type
* @param protocolType protocol type
*/
public Endpoint(String api,
String addressType,
String protocolType) {
this.api = api;
this.addressType = addressType;
this.protocolType = protocolType;
this.addresses = newAddresses(0);
}
/**
* Build an endpoint with a single address entry.
* <p>
* This constructor is superfluous given the varags constructor is equivalent
* for a single element argument. However, type-erasure in java generics
* causes javac to warn about unchecked generic array creation. This
* constructor, which represents the common "one address" case, does
* not generate compile-time warnings.
* @param api API name
* @param addressType address type
* @param protocolType protocol type
* @param addr address. May be null —in which case it is not added
*/
public Endpoint(String api,
String addressType,
String protocolType,
Map<String, String> addr) {
this(api, addressType, protocolType);
if (addr != null) {
addresses.add(addr);
}
}
/**
* Build an endpoint with a list of addresses
* @param api API name
* @param addressType address type
* @param protocolType protocol type
* @param addrs addresses. Null elements will be skipped
*/
public Endpoint(String api,
String addressType,
String protocolType,
Map<String, String>...addrs) {
this(api, addressType, protocolType);
for (Map<String, String> addr : addrs) {
if (addr!=null) {
addresses.add(addr);
}
}
}
/**
* Create a new address structure of the requested size
* @param size size to create
* @return the new list
*/
private List<Map<String, String>> newAddresses(int size) {
return new ArrayList<Map<String, String>>(size);
}
/**
* Build an endpoint from a list of URIs; each URI
* is ASCII-encoded and added to the list of addresses.
* @param api API name
* @param protocolType protocol type
* @param uris URIs to convert to a list of tup;les
*/
public Endpoint(String api,
String protocolType,
URI... uris) {
this.api = api;
this.addressType = AddressTypes.ADDRESS_URI;
this.protocolType = protocolType;
List<Map<String, String>> addrs = newAddresses(uris.length);
for (URI uri : uris) {
addrs.add(RegistryTypeUtils.uri(uri.toString()));
}
this.addresses = addrs;
}
@Override
public String toString() {
return marshalToString.toString(this);
}
/**
* Validate the record by checking for null fields and other invalid
* conditions
* @throws NullPointerException if a field is null when it
* MUST be set.
* @throws RuntimeException on invalid entries
*/
public void validate() {
Preconditions.checkNotNull(api, "null API field");
Preconditions.checkNotNull(addressType, "null addressType field");
Preconditions.checkNotNull(protocolType, "null protocolType field");
Preconditions.checkNotNull(addresses, "null addresses field");
for (Map<String, String> address : addresses) {
Preconditions.checkNotNull(address, "null element in address");
}
}
/**
* Shallow clone: the lists of addresses are shared
* @return a cloned instance
* @throws CloneNotSupportedException
*/
@Override
public Object clone() throws CloneNotSupportedException {
return super.clone();
}
/**
* Static instance of service record marshalling
*/
private static class Marshal extends JsonSerDeser<Endpoint> {
private Marshal() {
super(Endpoint.class);
}
}
private static final Marshal marshalToString = new Marshal();
}
| 7,404 | 28.858871 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/types/package-info.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* This package contains all the data types which can be saved to the registry
* and/or marshalled to and from JSON.
* <p>
* The core datatypes, {@link org.apache.hadoop.registry.client.types.ServiceRecord},
* and {@link org.apache.hadoop.registry.client.types.Endpoint} are
* what is used to describe services and their protocol endpoints in the registry.
* <p>
* Some adjacent interfaces exist to list attributes of the fields:
* <ul>
* <li>{@link org.apache.hadoop.registry.client.types.AddressTypes}</li>
* <li>{@link org.apache.hadoop.registry.client.types.yarn.PersistencePolicies}</li>
* <li>{@link org.apache.hadoop.registry.client.types.ProtocolTypes}</li>
* </ul>
*
* The {@link org.apache.hadoop.registry.client.types.RegistryPathStatus}
* class is not saved to the registry —it is the status of a registry
* entry that can be retrieved from the API call. It is still
* designed to be marshalled to and from JSON, as it can be served up
* from REST front ends to the registry.
*
*/
package org.apache.hadoop.registry.client.types;
| 1,879 | 43.761905 | 86 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/types/RegistryPathStatus.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.registry.client.types;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.codehaus.jackson.annotate.JsonIgnoreProperties;
import org.codehaus.jackson.annotate.JsonProperty;
/**
* Output of a <code>RegistryOperations.stat()</code> call
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
@JsonIgnoreProperties(ignoreUnknown = true)
public final class RegistryPathStatus {
/**
* Short path in the registry to this entry
*/
public final String path;
/**
* Timestamp
*/
public final long time;
/**
* Entry size in bytes, as returned by the storage infrastructure.
* In zookeeper, even "empty" nodes have a non-zero size.
*/
public final long size;
/**
* Number of child nodes
*/
public final int children;
/**
* Construct an instance
* @param path full path
* @param time time
* @param size entry size
* @param children number of children
*/
public RegistryPathStatus(
@JsonProperty("path") String path,
@JsonProperty("time") long time,
@JsonProperty("size") long size,
@JsonProperty("children") int children) {
this.path = path;
this.time = time;
this.size = size;
this.children = children;
}
/**
* Equality operator checks size, time and path of the entries.
* It does <i>not</i> check {@link #children}.
* @param other the other entry
* @return true if the entries are considered equal.
*/
@Override
public boolean equals(Object other) {
if (this == other) {
return true;
}
if (other == null || getClass() != other.getClass()) {
return false;
}
RegistryPathStatus status = (RegistryPathStatus) other;
if (size != status.size) {
return false;
}
if (time != status.time) {
return false;
}
if (path != null ? !path.equals(status.path) : status.path != null) {
return false;
}
return true;
}
/**
* The hash code is derived from the path.
* @return hash code for storing the path in maps.
*/
@Override
public int hashCode() {
return path != null ? path.hashCode() : 0;
}
@Override
public String toString() {
final StringBuilder sb =
new StringBuilder("RegistryPathStatus{");
sb.append("path='").append(path).append('\'');
sb.append(", time=").append(time);
sb.append(", size=").append(size);
sb.append(", children=").append(children);
sb.append('}');
return sb.toString();
}
}
| 3,375 | 26.225806 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/types/ProtocolTypes.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.registry.client.types;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* some common protocol types
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public interface ProtocolTypes {
/**
* Addresses are URIs of Hadoop Filesystem paths: {@value}.
*/
String PROTOCOL_FILESYSTEM = "hadoop/filesystem";
/**
* Hadoop IPC, "classic" or protobuf : {@value}.
*/
String PROTOCOL_HADOOP_IPC = "hadoop/IPC";
/**
* Corba IIOP: {@value}.
*/
String PROTOCOL_IIOP = "IIOP";
/**
* REST: {@value}.
*/
String PROTOCOL_REST = "REST";
/**
* Java RMI: {@value}.
*/
String PROTOCOL_RMI = "RMI";
/**
* SunOS RPC, as used by NFS and similar: {@value}.
*/
String PROTOCOL_SUN_RPC = "sunrpc";
/**
* Thrift-based protocols: {@value}.
*/
String PROTOCOL_THRIFT = "thrift";
/**
* Custom TCP protocol: {@value}.
*/
String PROTOCOL_TCP = "tcp";
/**
* Custom UPC-based protocol : {@value}.
*/
String PROTOCOL_UDP = "udp";
/**
* Default value —the protocol is unknown : "{@value}"
*/
String PROTOCOL_UNKNOWN = "";
/**
* Web page: {@value}.
*
* This protocol implies that the URLs are designed for
* people to view via web browsers.
*/
String PROTOCOL_WEBUI = "webui";
/**
* Web Services: {@value}.
*/
String PROTOCOL_WSAPI = "WS-*";
/**
* A zookeeper binding: {@value}.
*/
String PROTOCOL_ZOOKEEPER_BINDING = "zookeeper";
}
| 2,365 | 22.66 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/types/AddressTypes.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.registry.client.types;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* Enum of address types -as integers.
* Why integers and not enums? Cross platform serialization as JSON
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public interface AddressTypes {
/**
* hostname/FQDN and port pair: {@value}.
* The host/domain name and port are set as separate strings in the address
* list, e.g.
* <pre>
* ["namenode.example.org", "50070"]
* </pre>
*/
public static final String ADDRESS_HOSTNAME_AND_PORT = "host/port";
public static final String ADDRESS_HOSTNAME_FIELD = "host";
public static final String ADDRESS_PORT_FIELD = "port";
/**
* Path <code>/a/b/c</code> style: {@value}.
* The entire path is encoded in a single entry
*
* <pre>
* ["/users/example/dataset"]
* </pre>
*/
public static final String ADDRESS_PATH = "path";
/**
* URI entries: {@value}.
* <pre>
* ["http://example.org"]
* </pre>
*/
public static final String ADDRESS_URI = "uri";
/**
* Zookeeper addresses as a triple : {@value}.
* <p>
* These are provide as a 3 element tuple of: hostname, port
* and optionally path (depending on the application)
* <p>
* A single element would be
* <pre>
* ["zk1","2181","/registry"]
* </pre>
* An endpoint with multiple elements would list them as
* <pre>
* [
* ["zk1","2181","/registry"]
* ["zk2","1600","/registry"]
* ]
* </pre>
*
* the third element in each entry , the path, MUST be the same in each entry.
* A client reading the addresses of an endpoint is free to pick any
* of the set, so they must be the same.
*
*/
public static final String ADDRESS_ZOOKEEPER = "zktriple";
/**
* Any other address: {@value}.
*/
public static final String ADDRESS_OTHER = "";
}
| 2,779 | 28.263158 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/types/ServiceRecord.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.registry.client.types;
import com.google.common.base.Preconditions;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.registry.client.exceptions.InvalidRecordException;
import org.codehaus.jackson.annotate.JsonAnyGetter;
import org.codehaus.jackson.annotate.JsonAnySetter;
import org.codehaus.jackson.map.annotate.JsonSerialize;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
/**
* JSON-marshallable description of a single component.
* It supports the deserialization of unknown attributes, but does
* not support their creation.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
@JsonSerialize(include = JsonSerialize.Inclusion.NON_NULL)
public class ServiceRecord implements Cloneable {
/**
* A type string which MUST be in the serialized json. This permits
* fast discarding of invalid entries
*/
public static final String RECORD_TYPE = "JSONServiceRecord";
/**
* The type field. This must be the string {@link #RECORD_TYPE}
*/
public String type = RECORD_TYPE;
/**
* Description string
*/
public String description;
/**
* map to handle unknown attributes.
*/
private Map<String, String> attributes = new HashMap<String, String>(4);
/**
* List of endpoints intended for use to external callers
*/
public List<Endpoint> external = new ArrayList<Endpoint>();
/**
* List of endpoints for use <i>within</i> an application.
*/
public List<Endpoint> internal = new ArrayList<Endpoint>();
/**
* Create a service record with no ID, description or registration time.
* Endpoint lists are set to empty lists.
*/
public ServiceRecord() {
}
/**
* Deep cloning constructor
* @param that service record source
*/
public ServiceRecord(ServiceRecord that) {
this.description = that.description;
// others
Map<String, String> thatAttrs = that.attributes;
for (Map.Entry<String, String> entry : thatAttrs.entrySet()) {
attributes.put(entry.getKey(), entry.getValue());
}
// endpoints
List<Endpoint> src = that.internal;
if (src != null) {
internal = new ArrayList<Endpoint>(src.size());
for (Endpoint endpoint : src) {
internal.add(new Endpoint(endpoint));
}
}
src = that.external;
if (src != null) {
external = new ArrayList<Endpoint>(src.size());
for (Endpoint endpoint : src) {
external.add(new Endpoint(endpoint));
}
}
}
/**
* Add an external endpoint
* @param endpoint endpoint to set
*/
public void addExternalEndpoint(Endpoint endpoint) {
Preconditions.checkArgument(endpoint != null);
endpoint.validate();
external.add(endpoint);
}
/**
* Add an internal endpoint
* @param endpoint endpoint to set
*/
public void addInternalEndpoint(Endpoint endpoint) {
Preconditions.checkArgument(endpoint != null);
endpoint.validate();
internal.add(endpoint);
}
/**
* Look up an internal endpoint
* @param api API
* @return the endpoint or null if there was no match
*/
public Endpoint getInternalEndpoint(String api) {
return findByAPI(internal, api);
}
/**
* Look up an external endpoint
* @param api API
* @return the endpoint or null if there was no match
*/
public Endpoint getExternalEndpoint(String api) {
return findByAPI(external, api);
}
/**
* Handle unknown attributes by storing them in the
* {@link #attributes} map
* @param key attribute name
* @param value attribute value.
*/
@JsonAnySetter
public void set(String key, Object value) {
attributes.put(key, value.toString());
}
/**
* The map of "other" attributes set when parsing. These
* are not included in the JSON value of this record when it
* is generated.
* @return a map of any unknown attributes in the deserialized JSON.
*/
@JsonAnyGetter
public Map<String, String> attributes() {
return attributes;
}
/**
* Get the "other" attribute with a specific key
* @param key key to look up
* @return the value or null
*/
public String get(String key) {
return attributes.get(key);
}
/**
* Get the "other" attribute with a specific key.
* @param key key to look up
* @param defVal default value
* @return the value as a string,
* or <code>defval</code> if the value was not present
*/
public String get(String key, String defVal) {
String val = attributes.get(key);
return val != null ? val: defVal;
}
/**
* Find an endpoint by its API
* @param list list
* @param api api name
* @return the endpoint or null if there was no match
*/
private Endpoint findByAPI(List<Endpoint> list, String api) {
for (Endpoint endpoint : list) {
if (endpoint.api.equals(api)) {
return endpoint;
}
}
return null;
}
@Override
public String toString() {
final StringBuilder sb =
new StringBuilder("ServiceRecord{");
sb.append("description='").append(description).append('\'');
sb.append("; external endpoints: {");
for (Endpoint endpoint : external) {
sb.append(endpoint).append("; ");
}
sb.append("}; internal endpoints: {");
for (Endpoint endpoint : internal) {
sb.append(endpoint != null ? endpoint.toString() : "NULL ENDPOINT");
sb.append("; ");
}
sb.append('}');
if (!attributes.isEmpty()) {
sb.append(", attributes: {");
for (Map.Entry<String, String> attr : attributes.entrySet()) {
sb.append("\"").append(attr.getKey()).append("\"=\"")
.append(attr.getValue()).append("\" ");
}
} else {
sb.append(", attributes: {");
}
sb.append('}');
sb.append('}');
return sb.toString();
}
/**
* Shallow clone: all endpoints will be shared across instances
* @return a clone of the instance
* @throws CloneNotSupportedException
*/
@Override
protected Object clone() throws CloneNotSupportedException {
return super.clone();
}
}
| 6,981 | 26.928 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/types/yarn/PersistencePolicies.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.registry.client.types.yarn;
import org.apache.hadoop.registry.client.types.ServiceRecord;
/**
* Persistence policies for {@link ServiceRecord}
*/
public interface PersistencePolicies {
/**
* The record persists until removed manually: {@value}.
*/
String PERMANENT = "permanent";
/**
* Remove when the YARN application defined in the id field
* terminates: {@value}.
*/
String APPLICATION = "application";
/**
* Remove when the current YARN application attempt ID finishes: {@value}.
*/
String APPLICATION_ATTEMPT = "application-attempt";
/**
* Remove when the YARN container in the ID field finishes: {@value}
*/
String CONTAINER = "container";
}
| 1,534 | 29.098039 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/types/yarn/YarnRegistryAttributes.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.registry.client.types.yarn;
/**
* YARN specific attributes in the registry
*/
public class YarnRegistryAttributes {
/**
* ID. For containers: container ID. For application instances, application ID.
*/
public static final String YARN_ID = "yarn:id";
public static final String YARN_PERSISTENCE = "yarn:persistence";
}
| 1,167 | 35.5 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/package-info.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Server-side classes for the registry
* <p>
* These are components intended to be deployed only on servers or in test
* JVMs, rather than on client machines.
* <p>
* Example components are: server-side ZK support, a REST service, etc.
*/
package org.apache.hadoop.registry.server;
| 1,106 | 38.535714 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/services/package-info.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Basic services for the YARN registry
* <ul>
* <li>
* The {@link org.apache.hadoop.registry.server.services.RegistryAdminService}
* extends the shared Yarn Registry client with registry setup and
* (potentially asynchronous) administrative actions.
* </li>
* <li>
* The {@link org.apache.hadoop.registry.server.services.MicroZookeeperService}
* is a transient Zookeeper instance bound to the YARN service lifecycle.
* It is suitable for testing.
* </li>
* <li>
* The {@link org.apache.hadoop.registry.server.services.AddingCompositeService}
* extends the standard YARN composite service by making its add and remove
* methods public. It is a utility service used in parts of the codebase
* </li>
* </ul>
*/
package org.apache.hadoop.registry.server.services;
| 1,638 | 39.975 | 84 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/services/RegistryAdminService.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.registry.server.services;
import com.google.common.annotations.VisibleForTesting;
import org.apache.commons.lang.StringUtils;
import org.apache.curator.framework.api.BackgroundCallback;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.PathIsNotEmptyDirectoryException;
import org.apache.hadoop.fs.PathNotFoundException;
import org.apache.hadoop.service.ServiceStateException;
import org.apache.hadoop.registry.client.binding.RegistryUtils;
import org.apache.hadoop.registry.client.binding.RegistryPathUtils;
import org.apache.hadoop.registry.client.exceptions.InvalidRecordException;
import org.apache.hadoop.registry.client.exceptions.NoPathPermissionsException;
import org.apache.hadoop.registry.client.exceptions.NoRecordException;
import org.apache.hadoop.registry.client.impl.zk.RegistryBindingSource;
import org.apache.hadoop.registry.client.impl.zk.RegistryOperationsService;
import org.apache.hadoop.registry.client.impl.zk.RegistrySecurity;
import org.apache.hadoop.registry.client.types.RegistryPathStatus;
import org.apache.hadoop.registry.client.types.ServiceRecord;
import org.apache.zookeeper.CreateMode;
import org.apache.zookeeper.ZooDefs;
import org.apache.zookeeper.data.ACL;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.EOFException;
import java.io.IOException;
import java.io.InterruptedIOException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import java.util.concurrent.ThreadFactory;
import java.util.concurrent.atomic.AtomicInteger;
/**
* Administrator service for the registry. This is the one with
* permissions to create the base directories and those for users.
*
* It also includes support for asynchronous operations, so that
* zookeeper connectivity problems do not hold up the server code
* performing the actions.
*
* Any action queued via {@link #submit(Callable)} will be
* run asynchronously. The {@link #createDirAsync(String, List, boolean)}
* is an example of such an an action
*
* A key async action is the depth-first tree purge, which supports
* pluggable policies for deleting entries. The method
* {@link #purge(String, NodeSelector, PurgePolicy, BackgroundCallback)}
* implements the recursive purge operation —the class
* {{AsyncPurge}} provides the asynchronous scheduling of this.
*/
public class RegistryAdminService extends RegistryOperationsService {
private static final Logger LOG =
LoggerFactory.getLogger(RegistryAdminService.class);
/**
* The ACL permissions for the user's homedir ACL.
*/
public static final int USER_HOMEDIR_ACL_PERMISSIONS =
ZooDefs.Perms.READ | ZooDefs.Perms.WRITE
| ZooDefs.Perms.CREATE | ZooDefs.Perms.DELETE;
/**
* Executor for async operations
*/
protected final ExecutorService executor;
/**
* Construct an instance of the service
* @param name service name
*/
public RegistryAdminService(String name) {
this(name, null);
}
/**
* construct an instance of the service, using the
* specified binding source to bond to ZK
* @param name service name
* @param bindingSource provider of ZK binding information
*/
public RegistryAdminService(String name,
RegistryBindingSource bindingSource) {
super(name, bindingSource);
executor = Executors.newCachedThreadPool(
new ThreadFactory() {
private AtomicInteger counter = new AtomicInteger(1);
@Override
public Thread newThread(Runnable r) {
return new Thread(r,
"RegistryAdminService " + counter.getAndIncrement());
}
});
}
/**
* Stop the service: halt the executor.
* @throws Exception exception.
*/
@Override
protected void serviceStop() throws Exception {
stopExecutor();
super.serviceStop();
}
/**
* Stop the executor if it is not null.
* This uses {@link ExecutorService#shutdownNow()}
* and so does not block until they have completed.
*/
protected synchronized void stopExecutor() {
if (executor != null) {
executor.shutdownNow();
}
}
/**
* Get the executor
* @return the executor
*/
protected ExecutorService getExecutor() {
return executor;
}
/**
* Submit a callable
* @param callable callable
* @param <V> type of the final get
* @return a future to wait on
*/
public <V> Future<V> submit(Callable<V> callable) {
if (LOG.isDebugEnabled()) {
LOG.debug("Submitting {}", callable);
}
return getExecutor().submit(callable);
}
/**
* Asynchronous operation to create a directory
* @param path path
* @param acls ACL list
* @param createParents flag to indicate parent dirs should be created
* as needed
* @return the future which will indicate whether or not the operation
* succeeded —and propagate any exceptions
* @throws IOException
*/
public Future<Boolean> createDirAsync(final String path,
final List<ACL> acls,
final boolean createParents) throws IOException {
return submit(new Callable<Boolean>() {
@Override
public Boolean call() throws Exception {
return maybeCreate(path, CreateMode.PERSISTENT,
acls, createParents);
}
});
}
/**
* Init operation sets up the system ACLs.
* @param conf configuration of the service
* @throws Exception
*/
@Override
protected void serviceInit(Configuration conf) throws Exception {
super.serviceInit(conf);
RegistrySecurity registrySecurity = getRegistrySecurity();
if (registrySecurity.isSecureRegistry()) {
ACL sasl = registrySecurity.createSaslACLFromCurrentUser(ZooDefs.Perms.ALL);
registrySecurity.addSystemACL(sasl);
LOG.info("Registry System ACLs:",
RegistrySecurity.aclsToString(
registrySecurity.getSystemACLs()));
}
}
/**
* Start the service, including creating base directories with permissions
* @throws Exception
*/
@Override
protected void serviceStart() throws Exception {
super.serviceStart();
// create the root directories
try {
createRootRegistryPaths();
} catch (NoPathPermissionsException e) {
String message = String.format(Locale.ENGLISH,
"Failed to create root paths {%s};" +
"%ndiagnostics={%s}" +
"%ncurrent registry is:" +
"%n{%s}",
e,
bindingDiagnosticDetails(),
dumpRegistryRobustly(true));
LOG.error(" Failure {}", e, e);
LOG.error(message);
// TODO: this is something temporary to deal with the problem
// that jenkins is failing this test
throw new NoPathPermissionsException(e.getPath().toString(), message, e);
}
}
/**
* Create the initial registry paths
* @throws IOException any failure
*/
@VisibleForTesting
public void createRootRegistryPaths() throws IOException {
List<ACL> systemACLs = getRegistrySecurity().getSystemACLs();
LOG.info("System ACLs {}",
RegistrySecurity.aclsToString(systemACLs));
maybeCreate("", CreateMode.PERSISTENT, systemACLs, false);
maybeCreate(PATH_USERS, CreateMode.PERSISTENT,
systemACLs, false);
maybeCreate(PATH_SYSTEM_SERVICES,
CreateMode.PERSISTENT,
systemACLs, false);
}
/**
* Get the path to a user's home dir
* @param username username
* @return a path for services underneath
*/
protected String homeDir(String username) {
return RegistryUtils.homePathForUser(username);
}
/**
* Set up the ACL for the user.
* <b>Important: this must run client-side as it needs
* to know the id:pass tuple for a user</b>
* @param username user name
* @param perms permissions
* @return an ACL list
* @throws IOException ACL creation/parsing problems
*/
public List<ACL> aclsForUser(String username, int perms) throws IOException {
List<ACL> clientACLs = getClientAcls();
RegistrySecurity security = getRegistrySecurity();
if (security.isSecureRegistry()) {
clientACLs.add(security.createACLfromUsername(username, perms));
}
return clientACLs;
}
/**
* Start an async operation to create the home path for a user
* if it does not exist
* @param shortname username, without any @REALM in kerberos
* @return the path created
* @throws IOException any failure while setting up the operation
*
*/
public Future<Boolean> initUserRegistryAsync(final String shortname)
throws IOException {
String homeDir = homeDir(shortname);
if (!exists(homeDir)) {
// create the directory. The user does not
return createDirAsync(homeDir,
aclsForUser(shortname,
USER_HOMEDIR_ACL_PERMISSIONS),
false);
}
return null;
}
/**
* Create the home path for a user if it does not exist.
*
* This uses {@link #initUserRegistryAsync(String)} and then waits for the
* result ... the code path is the same as the async operation; this just
* picks up and relays/converts exceptions
* @param username username
* @return the path created
* @throws IOException any failure
*
*/
public String initUserRegistry(final String username)
throws IOException {
try {
Future<Boolean> future = initUserRegistryAsync(username);
future.get();
} catch (InterruptedException e) {
throw (InterruptedIOException)
(new InterruptedIOException(e.toString()).initCause(e));
} catch (ExecutionException e) {
Throwable cause = e.getCause();
if (cause instanceof IOException) {
throw (IOException) (cause);
} else {
throw new IOException(cause.toString(), cause);
}
}
return homeDir(username);
}
/**
* Method to validate the validity of the kerberos realm.
* <ul>
* <li>Insecure: not needed.</li>
* <li>Secure: must have been determined.</li>
* </ul>
*/
protected void verifyRealmValidity() throws ServiceStateException {
if (isSecure()) {
String realm = getRegistrySecurity().getKerberosRealm();
if (StringUtils.isEmpty(realm)) {
throw new ServiceStateException("Cannot determine service realm");
}
if (LOG.isDebugEnabled()) {
LOG.debug("Started Registry operations in realm {}", realm);
}
}
}
/**
* Policy to purge entries
*/
public enum PurgePolicy {
PurgeAll,
FailOnChildren,
SkipOnChildren
}
/**
* Recursive operation to purge all matching records under a base path.
* <ol>
* <li>Uses a depth first search</li>
* <li>A match is on ID and persistence policy, or, if policy==-1, any match</li>
* <li>If a record matches then it is deleted without any child searches</li>
* <li>Deletions will be asynchronous if a callback is provided</li>
* </ol>
*
* The code is designed to be robust against parallel deletions taking place;
* in such a case it will stop attempting that part of the tree. This
* avoid the situation of more than 1 purge happening in parallel and
* one of the purge operations deleteing the node tree above the other.
* @param path base path
* @param selector selector for the purge policy
* @param purgePolicy what to do if there is a matching record with children
* @param callback optional curator callback
* @return the number of delete operations perfomed. As deletes may be for
* everything under a path, this may be less than the number of records
* actually deleted
* @throws IOException problems
* @throws PathIsNotEmptyDirectoryException if an entry cannot be deleted
* as it has children and the purge policy is FailOnChildren
*/
@VisibleForTesting
public int purge(String path,
NodeSelector selector,
PurgePolicy purgePolicy,
BackgroundCallback callback) throws IOException {
boolean toDelete = false;
// look at self to see if it has a service record
Map<String, RegistryPathStatus> childEntries;
Collection<RegistryPathStatus> entries;
try {
// list this path's children
childEntries = RegistryUtils.statChildren(this, path);
entries = childEntries.values();
} catch (PathNotFoundException e) {
// there's no record here, it may have been deleted already.
// exit
return 0;
}
try {
RegistryPathStatus registryPathStatus = stat(path);
ServiceRecord serviceRecord = resolve(path);
// there is now an entry here.
toDelete = selector.shouldSelect(path, registryPathStatus, serviceRecord);
} catch (EOFException ignored) {
// ignore
} catch (InvalidRecordException ignored) {
// ignore
} catch (NoRecordException ignored) {
// ignore
} catch (PathNotFoundException e) {
// there's no record here, it may have been deleted already.
// exit
return 0;
}
if (toDelete && !entries.isEmpty()) {
if (LOG.isDebugEnabled()) {
LOG.debug("Match on record @ {} with children ", path);
}
// there's children
switch (purgePolicy) {
case SkipOnChildren:
// don't do the deletion... continue to next record
if (LOG.isDebugEnabled()) {
LOG.debug("Skipping deletion");
}
toDelete = false;
break;
case PurgeAll:
// mark for deletion
if (LOG.isDebugEnabled()) {
LOG.debug("Scheduling for deletion with children");
}
toDelete = true;
entries = new ArrayList<RegistryPathStatus>(0);
break;
case FailOnChildren:
if (LOG.isDebugEnabled()) {
LOG.debug("Failing deletion operation");
}
throw new PathIsNotEmptyDirectoryException(path);
}
}
int deleteOps = 0;
if (toDelete) {
try {
zkDelete(path, true, callback);
} catch (PathNotFoundException e) {
// sign that the path was deleted during the operation.
// this is a no-op, and all children can be skipped
return deleteOps;
}
deleteOps++;
}
// now go through the children
for (RegistryPathStatus status : entries) {
String childname = status.path;
String childpath = RegistryPathUtils.join(path, childname);
deleteOps += purge(childpath,
selector,
purgePolicy,
callback);
}
return deleteOps;
}
/**
* Comparator used for purge logic
*/
public interface NodeSelector {
boolean shouldSelect(String path,
RegistryPathStatus registryPathStatus,
ServiceRecord serviceRecord);
}
/**
* An async registry purge action taking
* a selector which decides what to delete
*/
public class AsyncPurge implements Callable<Integer> {
private final BackgroundCallback callback;
private final NodeSelector selector;
private final String path;
private final PurgePolicy purgePolicy;
public AsyncPurge(String path,
NodeSelector selector,
PurgePolicy purgePolicy,
BackgroundCallback callback) {
this.callback = callback;
this.selector = selector;
this.path = path;
this.purgePolicy = purgePolicy;
}
@Override
public Integer call() throws Exception {
if (LOG.isDebugEnabled()) {
LOG.debug("Executing {}", this);
}
return purge(path,
selector,
purgePolicy,
callback);
}
@Override
public String toString() {
return String.format(
"Record purge under %s with selector %s",
path, selector);
}
}
}
| 16,766 | 30.635849 | 85 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/services/MicroZookeeperService.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.registry.server.services;
import com.google.common.base.Preconditions;
import org.apache.commons.lang.StringUtils;
import org.apache.curator.ensemble.fixed.FixedEnsembleProvider;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.service.AbstractService;
import org.apache.hadoop.registry.client.api.RegistryConstants;
import org.apache.hadoop.registry.client.impl.zk.BindingInformation;
import org.apache.hadoop.registry.client.impl.zk.RegistryBindingSource;
import org.apache.hadoop.registry.client.impl.zk.RegistryInternalConstants;
import org.apache.hadoop.registry.client.impl.zk.RegistrySecurity;
import org.apache.hadoop.registry.client.impl.zk.ZookeeperConfigOptions;
import org.apache.zookeeper.server.ServerCnxnFactory;
import org.apache.zookeeper.server.ZooKeeperServer;
import org.apache.zookeeper.server.persistence.FileTxnSnapLog;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.io.IOException;
import java.io.PrintWriter;
import java.io.StringWriter;
import java.net.InetSocketAddress;
import java.net.UnknownHostException;
/**
* This is a small, localhost Zookeeper service instance that is contained
* in a YARN service...it's been derived from Apache Twill.
* <p>
* It implements {@link RegistryBindingSource} and provides binding information,
* <i>once started</i>. Until {@link #start()} is called, the hostname and
* port may be undefined. Accordingly, the service raises an exception in this
* condition.
* <p>
* If you wish to chain together a registry service with this one under
* the same {@code CompositeService}, this service must be added
* as a child first.
* <p>
* It also sets the configuration parameter
* {@link RegistryConstants#KEY_REGISTRY_ZK_QUORUM}
* to its connection string. Any code with access to the service configuration
* can view it.
*/
@InterfaceStability.Evolving
public class MicroZookeeperService
extends AbstractService
implements RegistryBindingSource, RegistryConstants,
ZookeeperConfigOptions,
MicroZookeeperServiceKeys{
private static final Logger
LOG = LoggerFactory.getLogger(MicroZookeeperService.class);
private File instanceDir;
private File dataDir;
private int tickTime;
private int port;
private String host;
private boolean secureServer;
private ServerCnxnFactory factory;
private BindingInformation binding;
private File confDir;
private StringBuilder diagnostics = new StringBuilder();
/**
* Create an instance
* @param name service name
*/
public MicroZookeeperService(String name) {
super(name);
}
/**
* Get the connection string.
* @return the string
* @throws IllegalStateException if the connection is not yet valid
*/
public String getConnectionString() {
Preconditions.checkState(factory != null, "service not started");
InetSocketAddress addr = factory.getLocalAddress();
return String.format("%s:%d", addr.getHostName(), addr.getPort());
}
/**
* Get the connection address
* @return the connection as an address
* @throws IllegalStateException if the connection is not yet valid
*/
public InetSocketAddress getConnectionAddress() {
Preconditions.checkState(factory != null, "service not started");
return factory.getLocalAddress();
}
/**
* Create an inet socket addr from the local host + port number
* @param port port to use
* @return a (hostname, port) pair
* @throws UnknownHostException if the server cannot resolve the host
*/
private InetSocketAddress getAddress(int port) throws UnknownHostException {
return new InetSocketAddress(host, port < 0 ? 0 : port);
}
/**
* Initialize the service, including choosing a path for the data
* @param conf configuration
* @throws Exception
*/
@Override
protected void serviceInit(Configuration conf) throws Exception {
port = conf.getInt(KEY_ZKSERVICE_PORT, 0);
tickTime = conf.getInt(KEY_ZKSERVICE_TICK_TIME,
ZooKeeperServer.DEFAULT_TICK_TIME);
String instancedirname = conf.getTrimmed(
KEY_ZKSERVICE_DIR, "");
host = conf.getTrimmed(KEY_ZKSERVICE_HOST, DEFAULT_ZKSERVICE_HOST);
if (instancedirname.isEmpty()) {
File testdir = new File(System.getProperty("test.dir", "target"));
instanceDir = new File(testdir, "zookeeper" + getName());
} else {
instanceDir = new File(instancedirname);
FileUtil.fullyDelete(instanceDir);
}
LOG.debug("Instance directory is {}", instanceDir);
mkdirStrict(instanceDir);
dataDir = new File(instanceDir, "data");
confDir = new File(instanceDir, "conf");
mkdirStrict(dataDir);
mkdirStrict(confDir);
super.serviceInit(conf);
}
/**
* Create a directory, ignoring if the dir is already there,
* and failing if a file or something else was at the end of that
* path
* @param dir dir to guarantee the existence of
* @throws IOException IO problems, or path exists but is not a dir
*/
private void mkdirStrict(File dir) throws IOException {
if (!dir.mkdirs()) {
if (!dir.isDirectory()) {
throw new IOException("Failed to mkdir " + dir);
}
}
}
/**
* Append a formatted string to the diagnostics.
* <p>
* A newline is appended afterwards.
* @param text text including any format commands
* @param args arguments for the forma operation.
*/
protected void addDiagnostics(String text, Object ... args) {
diagnostics.append(String.format(text, args)).append('\n');
}
/**
* Get the diagnostics info
* @return the diagnostics string built up
*/
public String getDiagnostics() {
return diagnostics.toString();
}
/**
* set up security. this must be done prior to creating
* the ZK instance, as it sets up JAAS if that has not been done already.
*
* @return true if the cluster has security enabled.
*/
public boolean setupSecurity() throws IOException {
Configuration conf = getConfig();
String jaasContext = conf.getTrimmed(KEY_REGISTRY_ZKSERVICE_JAAS_CONTEXT);
secureServer = StringUtils.isNotEmpty(jaasContext);
if (secureServer) {
RegistrySecurity.validateContext(jaasContext);
RegistrySecurity.bindZKToServerJAASContext(jaasContext);
// policy on failed auth
System.setProperty(PROP_ZK_ALLOW_FAILED_SASL_CLIENTS,
conf.get(KEY_ZKSERVICE_ALLOW_FAILED_SASL_CLIENTS,
"true"));
//needed so that you can use sasl: strings in the registry
System.setProperty(RegistryInternalConstants.ZOOKEEPER_AUTH_PROVIDER +".1",
RegistryInternalConstants.SASLAUTHENTICATION_PROVIDER);
String serverContext =
System.getProperty(PROP_ZK_SERVER_SASL_CONTEXT);
addDiagnostics("Server JAAS context s = %s", serverContext);
return true;
} else {
return false;
}
}
/**
* Startup: start ZK. It is only after this that
* the binding information is valid.
* @throws Exception
*/
@Override
protected void serviceStart() throws Exception {
setupSecurity();
ZooKeeperServer zkServer = new ZooKeeperServer();
FileTxnSnapLog ftxn = new FileTxnSnapLog(dataDir, dataDir);
zkServer.setTxnLogFactory(ftxn);
zkServer.setTickTime(tickTime);
LOG.info("Starting Local Zookeeper service");
factory = ServerCnxnFactory.createFactory();
factory.configure(getAddress(port), -1);
factory.startup(zkServer);
String connectString = getConnectionString();
LOG.info("In memory ZK started at {}\n", connectString);
if (LOG.isDebugEnabled()) {
StringWriter sw = new StringWriter();
PrintWriter pw = new PrintWriter(sw);
zkServer.dumpConf(pw);
pw.flush();
LOG.debug(sw.toString());
}
binding = new BindingInformation();
binding.ensembleProvider = new FixedEnsembleProvider(connectString);
binding.description =
getName() + " reachable at \"" + connectString + "\"";
addDiagnostics(binding.description);
// finally: set the binding information in the config
getConfig().set(KEY_REGISTRY_ZK_QUORUM, connectString);
}
/**
* When the service is stopped, it deletes the data directory
* and its contents
* @throws Exception
*/
@Override
protected void serviceStop() throws Exception {
if (factory != null) {
factory.shutdown();
factory = null;
}
if (dataDir != null) {
FileUtil.fullyDelete(dataDir);
}
}
@Override
public BindingInformation supplyBindingInformation() {
Preconditions.checkNotNull(binding,
"Service is not started: binding information undefined");
return binding;
}
}
| 9,613 | 32.971731 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/services/MicroZookeeperServiceKeys.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.registry.server.services;
import org.apache.hadoop.registry.client.api.RegistryConstants;
/**
* Service keys for configuring the {@link MicroZookeeperService}.
* These are not used in registry clients or the RM-side service,
* so are kept separate.
*/
public interface MicroZookeeperServiceKeys {
public static final String ZKSERVICE_PREFIX =
RegistryConstants.REGISTRY_PREFIX + "zk.service.";
/**
* Key to define the JAAS context for the ZK service: {@value}.
*/
public static final String KEY_REGISTRY_ZKSERVICE_JAAS_CONTEXT =
ZKSERVICE_PREFIX + "service.jaas.context";
/**
* ZK servertick time: {@value}
*/
public static final String KEY_ZKSERVICE_TICK_TIME =
ZKSERVICE_PREFIX + "ticktime";
/**
* host to register on: {@value}.
*/
public static final String KEY_ZKSERVICE_HOST = ZKSERVICE_PREFIX + "host";
/**
* Default host to serve on -this is <code>localhost</code> as it
* is the only one guaranteed to be available: {@value}.
*/
public static final String DEFAULT_ZKSERVICE_HOST = "localhost";
/**
* port; 0 or below means "any": {@value}
*/
public static final String KEY_ZKSERVICE_PORT = ZKSERVICE_PREFIX + "port";
/**
* Directory containing data: {@value}
*/
public static final String KEY_ZKSERVICE_DIR = ZKSERVICE_PREFIX + "dir";
/**
* Should failed SASL clients be allowed: {@value}?
*
* Default is the ZK default: true
*/
public static final String KEY_ZKSERVICE_ALLOW_FAILED_SASL_CLIENTS =
ZKSERVICE_PREFIX + "allow.failed.sasl.clients";
}
| 2,404 | 33.357143 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/services/AddingCompositeService.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.registry.server.services;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.service.CompositeService;
import org.apache.hadoop.service.Service;
/**
* Composite service that exports the add/remove methods.
* <p>
* This allows external classes to add services to these methods, after which
* they follow the same lifecyce.
* <p>
* It is essential that any service added is in a state where it can be moved
* on with that of the parent services. Specifically, do not add an uninited
* service to a parent that is already inited —as the <code>start</code>
* operation will then fail
*
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class AddingCompositeService extends CompositeService {
public AddingCompositeService(String name) {
super(name);
}
@Override
public void addService(Service service) {
super.addService(service);
}
@Override
public boolean removeService(Service service) {
return super.removeService(service);
}
}
| 1,914 | 32.596491 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/services/DeleteCompletionCallback.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.registry.server.services;
import org.apache.curator.framework.CuratorFramework;
import org.apache.curator.framework.api.BackgroundCallback;
import org.apache.curator.framework.api.CuratorEvent;
import org.apache.hadoop.registry.server.integration.RMRegistryOperationsService;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.concurrent.atomic.AtomicInteger;
/**
* Curator callback for delete operations completing.
* <p>
* This callback logs at debug and increments the event counter.
*/
public class DeleteCompletionCallback implements BackgroundCallback {
private static final Logger LOG =
LoggerFactory.getLogger(RMRegistryOperationsService.class);
private AtomicInteger events = new AtomicInteger(0);
@Override
public void processResult(CuratorFramework client,
CuratorEvent event) throws
Exception {
if (LOG.isDebugEnabled()) {
LOG.debug("Delete event {}", event);
}
events.incrementAndGet();
}
/**
* Get the number of deletion events
* @return the count of events
*/
public int getEventCount() {
return events.get();
}
}
| 1,960 | 32.237288 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/integration/package-info.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* This package contains the classes which integrate with the YARN resource
* manager.
*/
package org.apache.hadoop.registry.server.integration;
| 958 | 38.958333 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/integration/SelectByYarnPersistence.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.registry.server.integration;
import com.google.common.base.Preconditions;
import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.registry.client.types.RegistryPathStatus;
import org.apache.hadoop.registry.client.types.ServiceRecord;
import org.apache.hadoop.registry.client.types.yarn.YarnRegistryAttributes;
import org.apache.hadoop.registry.server.services.RegistryAdminService;
/**
* Select an entry by the YARN persistence policy
*/
public class SelectByYarnPersistence
implements RegistryAdminService.NodeSelector {
private final String id;
private final String targetPolicy;
public SelectByYarnPersistence(String id, String targetPolicy) {
Preconditions.checkArgument(!StringUtils.isEmpty(id), "id");
Preconditions.checkArgument(!StringUtils.isEmpty(targetPolicy),
"targetPolicy");
this.id = id;
this.targetPolicy = targetPolicy;
}
@Override
public boolean shouldSelect(String path,
RegistryPathStatus registryPathStatus,
ServiceRecord serviceRecord) {
String policy =
serviceRecord.get(YarnRegistryAttributes.YARN_PERSISTENCE, "");
return id.equals(serviceRecord.get(YarnRegistryAttributes.YARN_ID, ""))
&& (targetPolicy.equals(policy));
}
@Override
public String toString() {
return String.format(
"Select by ID %s and policy %s: {}",
id, targetPolicy);
}
}
| 2,227 | 35.52459 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/integration/RMRegistryOperationsService.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.registry.server.integration;
import com.google.common.annotations.VisibleForTesting;
import org.apache.curator.framework.api.BackgroundCallback;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.registry.client.impl.zk.RegistryBindingSource;
import org.apache.hadoop.registry.client.types.yarn.PersistencePolicies;
import org.apache.hadoop.registry.server.services.DeleteCompletionCallback;
import org.apache.hadoop.registry.server.services.RegistryAdminService;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.util.concurrent.Future;
/**
* Handle RM events by updating the registry
* <p>
* These actions are all implemented as event handlers to operations
* which come from the RM.
* <p>
* This service is expected to be executed by a user with the permissions
* to manipulate the entire registry,
*/
@InterfaceAudience.LimitedPrivate("YARN")
@InterfaceStability.Evolving
public class RMRegistryOperationsService extends RegistryAdminService {
private static final Logger LOG =
LoggerFactory.getLogger(RMRegistryOperationsService.class);
private PurgePolicy purgeOnCompletionPolicy = PurgePolicy.PurgeAll;
public RMRegistryOperationsService(String name) {
this(name, null);
}
public RMRegistryOperationsService(String name,
RegistryBindingSource bindingSource) {
super(name, bindingSource);
}
/**
* Extend the parent service initialization by verifying that the
* service knows —in a secure cluster— the realm in which it is executing.
* It needs this to properly build up the user names and hence their
* access rights.
*
* @param conf configuration of the service
* @throws Exception
*/
@Override
protected void serviceInit(Configuration conf) throws Exception {
super.serviceInit(conf);
verifyRealmValidity();
}
public PurgePolicy getPurgeOnCompletionPolicy() {
return purgeOnCompletionPolicy;
}
public void setPurgeOnCompletionPolicy(PurgePolicy purgeOnCompletionPolicy) {
this.purgeOnCompletionPolicy = purgeOnCompletionPolicy;
}
public void onApplicationAttemptRegistered(ApplicationAttemptId attemptId,
String host, int rpcport, String trackingurl) throws IOException {
}
public void onApplicationLaunched(ApplicationId id) throws IOException {
}
/**
* Actions to take as an AM registers itself with the RM.
* @param attemptId attempt ID
* @throws IOException problems
*/
public void onApplicationMasterRegistered(ApplicationAttemptId attemptId) throws
IOException {
}
/**
* Actions to take when the AM container is completed
* @param containerId container ID
* @throws IOException problems
*/
public void onAMContainerFinished(ContainerId containerId) throws
IOException {
LOG.info("AM Container {} finished, purging application attempt records",
containerId);
// remove all application attempt entries
purgeAppAttemptRecords(containerId.getApplicationAttemptId());
// also treat as a container finish to remove container
// level records for the AM container
onContainerFinished(containerId);
}
/**
* remove all application attempt entries
* @param attemptId attempt ID
*/
protected void purgeAppAttemptRecords(ApplicationAttemptId attemptId) {
purgeRecordsAsync("/",
attemptId.toString(),
PersistencePolicies.APPLICATION_ATTEMPT);
}
/**
* Actions to take when an application attempt is completed
* @param attemptId application ID
* @throws IOException problems
*/
public void onApplicationAttemptUnregistered(ApplicationAttemptId attemptId)
throws IOException {
LOG.info("Application attempt {} unregistered, purging app attempt records",
attemptId);
purgeAppAttemptRecords(attemptId);
}
/**
* Actions to take when an application is completed
* @param id application ID
* @throws IOException problems
*/
public void onApplicationCompleted(ApplicationId id)
throws IOException {
LOG.info("Application {} completed, purging application-level records",
id);
purgeRecordsAsync("/",
id.toString(),
PersistencePolicies.APPLICATION);
}
public void onApplicationAttemptAdded(ApplicationAttemptId appAttemptId) {
}
/**
* This is the event where the user is known, so the user directory
* can be created
* @param applicationId application ID
* @param user username
* @throws IOException problems
*/
public void onStateStoreEvent(ApplicationId applicationId, String user) throws
IOException {
initUserRegistryAsync(user);
}
/**
* Actions to take when the AM container is completed
* @param id container ID
* @throws IOException problems
*/
public void onContainerFinished(ContainerId id) throws IOException {
LOG.info("Container {} finished, purging container-level records",
id);
purgeRecordsAsync("/",
id.toString(),
PersistencePolicies.CONTAINER);
}
/**
* Queue an async operation to purge all matching records under a base path.
* <ol>
* <li>Uses a depth first search</li>
* <li>A match is on ID and persistence policy, or, if policy==-1, any match</li>
* <li>If a record matches then it is deleted without any child searches</li>
* <li>Deletions will be asynchronous if a callback is provided</li>
* </ol>
* @param path base path
* @param id ID for service record.id
* @param persistencePolicyMatch ID for the persistence policy to match:
* no match, no delete.
* @return a future that returns the #of records deleted
*/
@VisibleForTesting
public Future<Integer> purgeRecordsAsync(String path,
String id,
String persistencePolicyMatch) {
return purgeRecordsAsync(path,
id, persistencePolicyMatch,
purgeOnCompletionPolicy,
new DeleteCompletionCallback());
}
/**
* Queue an async operation to purge all matching records under a base path.
* <ol>
* <li>Uses a depth first search</li>
* <li>A match is on ID and persistence policy, or, if policy==-1, any match</li>
* <li>If a record matches then it is deleted without any child searches</li>
* <li>Deletions will be asynchronous if a callback is provided</li>
* </ol>
* @param path base path
* @param id ID for service record.id
* @param persistencePolicyMatch ID for the persistence policy to match:
* no match, no delete.
* @param purgePolicy how to react to children under the entry
* @param callback an optional callback
* @return a future that returns the #of records deleted
*/
@VisibleForTesting
public Future<Integer> purgeRecordsAsync(String path,
String id,
String persistencePolicyMatch,
PurgePolicy purgePolicy,
BackgroundCallback callback) {
LOG.info(" records under {} with ID {} and policy {}: {}",
path, id, persistencePolicyMatch);
return submit(
new AsyncPurge(path,
new SelectByYarnPersistence(id, persistencePolicyMatch),
purgePolicy,
callback));
}
}
| 8,305 | 32.62753 | 85 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-sharedcachemanager/src/test/java/org/apache/hadoop/yarn/server/sharedcachemanager/TestCleanerTask.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.sharedcachemanager;
import static org.mockito.Matchers.anyBoolean;
import static org.mockito.Matchers.eq;
import static org.mockito.Matchers.isA;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.never;
import static org.mockito.Mockito.spy;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantLock;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.server.sharedcachemanager.metrics.CleanerMetrics;
import org.apache.hadoop.yarn.server.sharedcachemanager.store.SCMStore;
import org.junit.Test;
public class TestCleanerTask {
private static final String ROOT =
YarnConfiguration.DEFAULT_SHARED_CACHE_ROOT;
private static final long SLEEP_TIME =
YarnConfiguration.DEFAULT_SCM_CLEANER_RESOURCE_SLEEP_MS;
private static final int NESTED_LEVEL =
YarnConfiguration.DEFAULT_SHARED_CACHE_NESTED_LEVEL;
@Test
public void testNonExistentRoot() throws Exception {
FileSystem fs = mock(FileSystem.class);
CleanerMetrics metrics = mock(CleanerMetrics.class);
SCMStore store = mock(SCMStore.class);
CleanerTask task =
createSpiedTask(fs, store, metrics, new ReentrantLock());
// the shared cache root does not exist
when(fs.exists(task.getRootPath())).thenReturn(false);
task.run();
// process() should not be called
verify(task, never()).process();
}
@Test
public void testProcessFreshResource() throws Exception {
FileSystem fs = mock(FileSystem.class);
CleanerMetrics metrics = mock(CleanerMetrics.class);
SCMStore store = mock(SCMStore.class);
CleanerTask task =
createSpiedTask(fs, store, metrics, new ReentrantLock());
// mock a resource that is not evictable
when(store.isResourceEvictable(isA(String.class), isA(FileStatus.class)))
.thenReturn(false);
FileStatus status = mock(FileStatus.class);
when(status.getPath()).thenReturn(new Path(ROOT + "/a/b/c/abc"));
// process the resource
task.processSingleResource(status);
// the directory should not be renamed
verify(fs, never()).rename(eq(status.getPath()), isA(Path.class));
// metrics should record a processed file (but not delete)
verify(metrics).reportAFileProcess();
verify(metrics, never()).reportAFileDelete();
}
@Test
public void testProcessEvictableResource() throws Exception {
FileSystem fs = mock(FileSystem.class);
CleanerMetrics metrics = mock(CleanerMetrics.class);
SCMStore store = mock(SCMStore.class);
CleanerTask task =
createSpiedTask(fs, store, metrics, new ReentrantLock());
// mock an evictable resource
when(store.isResourceEvictable(isA(String.class), isA(FileStatus.class)))
.thenReturn(true);
FileStatus status = mock(FileStatus.class);
when(status.getPath()).thenReturn(new Path(ROOT + "/a/b/c/abc"));
when(store.removeResource(isA(String.class))).thenReturn(true);
// rename succeeds
when(fs.rename(isA(Path.class), isA(Path.class))).thenReturn(true);
// delete returns true
when(fs.delete(isA(Path.class), anyBoolean())).thenReturn(true);
// process the resource
task.processSingleResource(status);
// the directory should be renamed
verify(fs).rename(eq(status.getPath()), isA(Path.class));
// metrics should record a deleted file
verify(metrics).reportAFileDelete();
verify(metrics, never()).reportAFileProcess();
}
private CleanerTask createSpiedTask(FileSystem fs, SCMStore store,
CleanerMetrics metrics, Lock isCleanerRunning) {
return spy(new CleanerTask(ROOT, SLEEP_TIME, NESTED_LEVEL, fs, store,
metrics, isCleanerRunning));
}
@Test
public void testResourceIsInUseHasAnActiveApp() throws Exception {
FileSystem fs = mock(FileSystem.class);
CleanerMetrics metrics = mock(CleanerMetrics.class);
SCMStore store = mock(SCMStore.class);
FileStatus resource = mock(FileStatus.class);
when(resource.getPath()).thenReturn(new Path(ROOT + "/a/b/c/abc"));
// resource is stale
when(store.isResourceEvictable(isA(String.class), isA(FileStatus.class)))
.thenReturn(true);
// but still has appIds
when(store.removeResource(isA(String.class))).thenReturn(false);
CleanerTask task =
createSpiedTask(fs, store, metrics, new ReentrantLock());
// process the resource
task.processSingleResource(resource);
// metrics should record a processed file (but not delete)
verify(metrics).reportAFileProcess();
verify(metrics, never()).reportAFileDelete();
}
}
| 5,695 | 36.228758 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-sharedcachemanager/src/test/java/org/apache/hadoop/yarn/server/sharedcachemanager/TestRemoteAppChecker.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.sharedcachemanager;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import static org.mockito.Mockito.doReturn;
import static org.mockito.Mockito.doThrow;
import static org.mockito.Mockito.spy;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ApplicationReport;
import org.apache.hadoop.yarn.api.records.YarnApplicationState;
import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationReportPBImpl;
import org.apache.hadoop.yarn.client.api.YarnClient;
import org.apache.hadoop.yarn.client.api.impl.YarnClientImpl;
import org.apache.hadoop.yarn.exceptions.ApplicationNotFoundException;
import org.junit.After;
import org.junit.Test;
public class TestRemoteAppChecker {
private RemoteAppChecker checker;
@After
public void cleanup() {
if (checker != null) {
checker.stop();
}
}
/**
* Creates/initializes/starts a RemoteAppChecker with a spied
* DummyYarnClientImpl.
*
* @return the spied DummyYarnClientImpl in the created AppChecker
*/
private YarnClient createCheckerWithMockedClient() {
YarnClient client = spy(new DummyYarnClientImpl());
checker = new RemoteAppChecker(client);
checker.init(new Configuration());
checker.start();
return client;
}
@Test
public void testNonExistentApp() throws Exception {
YarnClient client = createCheckerWithMockedClient();
ApplicationId id = ApplicationId.newInstance(1, 1);
// test for null
doReturn(null).when(client).getApplicationReport(id);
assertFalse(checker.isApplicationActive(id));
// test for ApplicationNotFoundException
doThrow(new ApplicationNotFoundException("Throw!")).when(client)
.getApplicationReport(id);
assertFalse(checker.isApplicationActive(id));
}
@Test
public void testRunningApp() throws Exception {
YarnClient client = createCheckerWithMockedClient();
ApplicationId id = ApplicationId.newInstance(1, 1);
// create a report and set the state to an active one
ApplicationReport report = new ApplicationReportPBImpl();
report.setYarnApplicationState(YarnApplicationState.ACCEPTED);
doReturn(report).when(client).getApplicationReport(id);
assertTrue(checker.isApplicationActive(id));
}
class DummyYarnClientImpl extends YarnClientImpl {
@Override
protected void serviceInit(Configuration conf) throws Exception {
// do nothing
}
@Override
protected void serviceStart() {
// do nothing
}
@Override
protected void serviceStop() {
// do nothing
}
}
}
| 3,508 | 31.490741 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-sharedcachemanager/src/test/java/org/apache/hadoop/yarn/server/sharedcachemanager/TestSCMAdminProtocolService.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.sharedcachemanager;
import static org.junit.Assert.assertEquals;
import static org.mockito.Matchers.any;
import static org.mockito.Matchers.isA;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.spy;
import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.doNothing;
import static org.mockito.Mockito.when;
import java.io.IOException;
import java.net.InetSocketAddress;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.yarn.server.api.SCMAdminProtocol;
import org.apache.hadoop.yarn.server.api.protocolrecords.RunSharedCacheCleanerTaskRequest;
import org.apache.hadoop.yarn.server.api.protocolrecords.RunSharedCacheCleanerTaskResponse;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RunSharedCacheCleanerTaskResponsePBImpl;
import org.apache.hadoop.yarn.client.SCMAdmin;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.factories.RecordFactory;
import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
import org.apache.hadoop.yarn.ipc.YarnRPC;
import org.apache.hadoop.yarn.server.sharedcachemanager.store.InMemorySCMStore;
import org.apache.hadoop.yarn.server.sharedcachemanager.store.SCMStore;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
/**
* Basic unit tests for the SCM Admin Protocol Service and SCMAdmin.
*/
public class TestSCMAdminProtocolService {
static SCMAdminProtocolService service;
static SCMAdminProtocol SCMAdminProxy;
static SCMAdminProtocol mockAdmin;
static SCMAdmin adminCLI;
static SCMStore store;
static CleanerService cleaner;
private final RecordFactory recordFactory = RecordFactoryProvider
.getRecordFactory(null);
@Before
public void startUp() {
Configuration conf = new Configuration();
conf.set(YarnConfiguration.SCM_STORE_CLASS,
InMemorySCMStore.class.getName());
cleaner = mock(CleanerService.class);
service = spy(new SCMAdminProtocolService(cleaner));
service.init(conf);
service.start();
YarnRPC rpc = YarnRPC.create(new Configuration());
InetSocketAddress scmAddress =
conf.getSocketAddr(YarnConfiguration.SCM_ADMIN_ADDRESS,
YarnConfiguration.DEFAULT_SCM_ADMIN_ADDRESS,
YarnConfiguration.DEFAULT_SCM_ADMIN_PORT);
SCMAdminProxy =
(SCMAdminProtocol) rpc.getProxy(SCMAdminProtocol.class, scmAddress,
conf);
mockAdmin = mock(SCMAdminProtocol.class);
adminCLI = new SCMAdmin(new Configuration()) {
@Override
protected SCMAdminProtocol createSCMAdminProtocol() throws IOException {
return mockAdmin;
}
};
}
@After
public void cleanUpTest() {
if (service != null) {
service.stop();
}
if (SCMAdminProxy != null) {
RPC.stopProxy(SCMAdminProxy);
}
}
@Test
public void testRunCleanerTask() throws Exception {
doNothing().when(cleaner).runCleanerTask();
RunSharedCacheCleanerTaskRequest request =
recordFactory.newRecordInstance(RunSharedCacheCleanerTaskRequest.class);
RunSharedCacheCleanerTaskResponse response = SCMAdminProxy.runCleanerTask(request);
Assert.assertTrue("cleaner task request isn't accepted", response.getAccepted());
verify(service, times(1)).runCleanerTask(any(RunSharedCacheCleanerTaskRequest.class));
}
@Test
public void testRunCleanerTaskCLI() throws Exception {
String[] args = { "-runCleanerTask" };
RunSharedCacheCleanerTaskResponse rp =
new RunSharedCacheCleanerTaskResponsePBImpl();
rp.setAccepted(true);
when(mockAdmin.runCleanerTask(isA(RunSharedCacheCleanerTaskRequest.class)))
.thenReturn(rp);
assertEquals(0, adminCLI.run(args));
rp.setAccepted(false);
when(mockAdmin.runCleanerTask(isA(RunSharedCacheCleanerTaskRequest.class)))
.thenReturn(rp);
assertEquals(1, adminCLI.run(args));
verify(mockAdmin, times(2)).runCleanerTask(
any(RunSharedCacheCleanerTaskRequest.class));
}
}
| 4,962 | 35.492647 | 105 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-sharedcachemanager/src/test/java/org/apache/hadoop/yarn/server/sharedcachemanager/TestClientSCMProtocolService.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.sharedcachemanager;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull;
import static org.mockito.Mockito.spy;
import java.io.File;
import java.io.IOException;
import java.net.InetSocketAddress;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.yarn.api.ClientSCMProtocol;
import org.apache.hadoop.yarn.api.protocolrecords.ReleaseSharedCacheResourceRequest;
import org.apache.hadoop.yarn.api.protocolrecords.UseSharedCacheResourceRequest;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.factories.RecordFactory;
import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
import org.apache.hadoop.yarn.ipc.YarnRPC;
import org.apache.hadoop.yarn.server.sharedcachemanager.metrics.ClientSCMMetrics;
import org.apache.hadoop.yarn.server.sharedcachemanager.store.InMemorySCMStore;
import org.apache.hadoop.yarn.server.sharedcachemanager.store.SCMStore;
import org.apache.hadoop.yarn.server.sharedcachemanager.store.SharedCacheResourceReference;
import org.junit.After;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
/**
* Basic unit tests for the Client to SCM Protocol Service.
*/
public class TestClientSCMProtocolService {
private static File testDir = null;
@BeforeClass
public static void setupTestDirs() throws IOException {
testDir = new File("target",
TestSharedCacheUploaderService.class.getCanonicalName());
testDir.delete();
testDir.mkdirs();
testDir = testDir.getAbsoluteFile();
}
@AfterClass
public static void cleanupTestDirs() throws IOException {
if (testDir != null) {
testDir.delete();
}
}
private ClientProtocolService service;
private ClientSCMProtocol clientSCMProxy;
private SCMStore store;
private final RecordFactory recordFactory = RecordFactoryProvider
.getRecordFactory(null);
@Before
public void startUp() {
Configuration conf = new Configuration();
conf.set(YarnConfiguration.SCM_STORE_CLASS,
InMemorySCMStore.class.getName());
conf.set(YarnConfiguration.SHARED_CACHE_ROOT, testDir.getPath());
AppChecker appChecker = spy(new DummyAppChecker());
store = new InMemorySCMStore(appChecker);
store.init(conf);
store.start();
service = new ClientProtocolService(store);
service.init(conf);
service.start();
YarnRPC rpc = YarnRPC.create(new Configuration());
InetSocketAddress scmAddress =
conf.getSocketAddr(YarnConfiguration.SCM_CLIENT_SERVER_ADDRESS,
YarnConfiguration.DEFAULT_SCM_CLIENT_SERVER_ADDRESS,
YarnConfiguration.DEFAULT_SCM_CLIENT_SERVER_PORT);
clientSCMProxy =
(ClientSCMProtocol) rpc.getProxy(ClientSCMProtocol.class, scmAddress,
conf);
}
@After
public void cleanUp() {
if (store != null) {
store.stop();
store = null;
}
if (service != null) {
service.stop();
service = null;
}
if (clientSCMProxy != null) {
RPC.stopProxy(clientSCMProxy);
clientSCMProxy = null;
}
}
@Test
public void testUse_MissingEntry() throws Exception {
long misses = ClientSCMMetrics.getInstance().getCacheMisses();
UseSharedCacheResourceRequest request =
recordFactory.newRecordInstance(UseSharedCacheResourceRequest.class);
request.setResourceKey("key1");
request.setAppId(createAppId(1, 1L));
assertNull(clientSCMProxy.use(request).getPath());
assertEquals("Client SCM metrics aren't updated.", 1, ClientSCMMetrics
.getInstance().getCacheMisses() - misses);
}
@Test
public void testUse_ExistingEntry_NoAppIds() throws Exception {
// Pre-populate the SCM with one cache entry
store.addResource("key1", "foo.jar");
long hits = ClientSCMMetrics.getInstance().getCacheHits();
UseSharedCacheResourceRequest request =
recordFactory.newRecordInstance(UseSharedCacheResourceRequest.class);
request.setResourceKey("key1");
request.setAppId(createAppId(2, 2L));
// Expecting default depth of 3 and under the shared cache root dir
String expectedPath = testDir.getAbsolutePath() + "/k/e/y/key1/foo.jar";
assertEquals(expectedPath, clientSCMProxy.use(request).getPath());
assertEquals(1, store.getResourceReferences("key1").size());
assertEquals("Client SCM metrics aren't updated.", 1, ClientSCMMetrics
.getInstance().getCacheHits() - hits);
}
@Test
public void testUse_ExistingEntry_OneId() throws Exception {
// Pre-populate the SCM with one cache entry
store.addResource("key1", "foo.jar");
store.addResourceReference("key1",
new SharedCacheResourceReference(createAppId(1, 1L), "user"));
assertEquals(1, store.getResourceReferences("key1").size());
long hits = ClientSCMMetrics.getInstance().getCacheHits();
// Add a new distinct appId
UseSharedCacheResourceRequest request =
recordFactory.newRecordInstance(UseSharedCacheResourceRequest.class);
request.setResourceKey("key1");
request.setAppId(createAppId(2, 2L));
// Expecting default depth of 3 under the shared cache root dir
String expectedPath = testDir.getAbsolutePath() + "/k/e/y/key1/foo.jar";
assertEquals(expectedPath, clientSCMProxy.use(request).getPath());
assertEquals(2, store.getResourceReferences("key1").size());
assertEquals("Client SCM metrics aren't updated.", 1, ClientSCMMetrics
.getInstance().getCacheHits() - hits);
}
@Test
public void testUse_ExistingEntry_DupId() throws Exception {
// Pre-populate the SCM with one cache entry
store.addResource("key1", "foo.jar");
UserGroupInformation testUGI = UserGroupInformation.getCurrentUser();
store.addResourceReference("key1",
new SharedCacheResourceReference(createAppId(1, 1L),
testUGI.getShortUserName()));
assertEquals(1, store.getResourceReferences("key1").size());
long hits = ClientSCMMetrics.getInstance().getCacheHits();
// Add a new duplicate appId
UseSharedCacheResourceRequest request =
recordFactory.newRecordInstance(UseSharedCacheResourceRequest.class);
request.setResourceKey("key1");
request.setAppId(createAppId(1, 1L));
// Expecting default depth of 3 under the shared cache root dir
String expectedPath = testDir.getAbsolutePath() + "/k/e/y/key1/foo.jar";
assertEquals(expectedPath, clientSCMProxy.use(request).getPath());
assertEquals(1, store.getResourceReferences("key1").size());
assertEquals("Client SCM metrics aren't updated.", 1, ClientSCMMetrics
.getInstance().getCacheHits() - hits);
}
@Test
public void testRelease_ExistingEntry_NonExistantAppId() throws Exception {
// Pre-populate the SCM with one cache entry
store.addResource("key1", "foo.jar");
store.addResourceReference("key1",
new SharedCacheResourceReference(createAppId(1, 1L), "user"));
assertEquals(1, store.getResourceReferences("key1").size());
long releases = ClientSCMMetrics.getInstance().getCacheReleases();
ReleaseSharedCacheResourceRequest request =
recordFactory
.newRecordInstance(ReleaseSharedCacheResourceRequest.class);
request.setResourceKey("key1");
request.setAppId(createAppId(2, 2L));
clientSCMProxy.release(request);
assertEquals(1, store.getResourceReferences("key1").size());
assertEquals(
"Client SCM metrics were updated when a release did not happen", 0,
ClientSCMMetrics.getInstance().getCacheReleases() - releases);
}
@Test
public void testRelease_ExistingEntry_WithAppId() throws Exception {
// Pre-populate the SCM with one cache entry
store.addResource("key1", "foo.jar");
UserGroupInformation testUGI = UserGroupInformation.getCurrentUser();
store.addResourceReference("key1",
new SharedCacheResourceReference(createAppId(1, 1L),
testUGI.getShortUserName()));
assertEquals(1, store.getResourceReferences("key1").size());
long releases = ClientSCMMetrics.getInstance().getCacheReleases();
ReleaseSharedCacheResourceRequest request =
recordFactory
.newRecordInstance(ReleaseSharedCacheResourceRequest.class);
request.setResourceKey("key1");
request.setAppId(createAppId(1, 1L));
clientSCMProxy.release(request);
assertEquals(0, store.getResourceReferences("key1").size());
assertEquals("Client SCM metrics aren't updated.", 1, ClientSCMMetrics
.getInstance().getCacheReleases() - releases);
}
@Test
public void testRelease_MissingEntry() throws Exception {
long releases = ClientSCMMetrics.getInstance().getCacheReleases();
ReleaseSharedCacheResourceRequest request =
recordFactory
.newRecordInstance(ReleaseSharedCacheResourceRequest.class);
request.setResourceKey("key2");
request.setAppId(createAppId(2, 2L));
clientSCMProxy.release(request);
assertNotNull(store.getResourceReferences("key2"));
assertEquals(0, store.getResourceReferences("key2").size());
assertEquals(
"Client SCM metrics were updated when a release did not happen.", 0,
ClientSCMMetrics.getInstance().getCacheReleases() - releases);
}
private ApplicationId createAppId(int id, long timestamp) {
return ApplicationId.newInstance(timestamp, id);
}
}
| 10,441 | 36.426523 | 91 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-sharedcachemanager/src/test/java/org/apache/hadoop/yarn/server/sharedcachemanager/TestSharedCacheUploaderService.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.sharedcachemanager;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
import static org.mockito.Mockito.spy;
import java.io.File;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.util.Collection;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.factories.RecordFactory;
import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
import org.apache.hadoop.yarn.ipc.YarnRPC;
import org.apache.hadoop.yarn.server.api.SCMUploaderProtocol;
import org.apache.hadoop.yarn.server.api.protocolrecords.SCMUploaderNotifyRequest;
import org.apache.hadoop.yarn.server.sharedcachemanager.metrics.SharedCacheUploaderMetrics;
import org.apache.hadoop.yarn.server.sharedcachemanager.store.InMemorySCMStore;
import org.apache.hadoop.yarn.server.sharedcachemanager.store.SCMStore;
import org.apache.hadoop.yarn.server.sharedcachemanager.store.SharedCacheResourceReference;
import org.junit.After;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
/**
* Basic unit tests for the NodeManger to SCM Protocol Service.
*/
public class TestSharedCacheUploaderService {
private static File testDir = null;
@BeforeClass
public static void setupTestDirs() throws IOException {
testDir = new File("target",
TestSharedCacheUploaderService.class.getCanonicalName());
testDir.delete();
testDir.mkdirs();
testDir = testDir.getAbsoluteFile();
}
@AfterClass
public static void cleanupTestDirs() throws IOException {
if (testDir != null) {
testDir.delete();
}
}
private SharedCacheUploaderService service;
private SCMUploaderProtocol proxy;
private SCMStore store;
private final RecordFactory recordFactory = RecordFactoryProvider
.getRecordFactory(null);
@Before
public void startUp() {
Configuration conf = new Configuration();
conf.set(YarnConfiguration.SCM_STORE_CLASS,
InMemorySCMStore.class.getName());
conf.set(YarnConfiguration.SHARED_CACHE_ROOT, testDir.getPath());
AppChecker appChecker = spy(new DummyAppChecker());
store = new InMemorySCMStore(appChecker);
store.init(conf);
store.start();
service = new SharedCacheUploaderService(store);
service.init(conf);
service.start();
YarnRPC rpc = YarnRPC.create(new Configuration());
InetSocketAddress scmAddress =
conf.getSocketAddr(YarnConfiguration.SCM_UPLOADER_SERVER_ADDRESS,
YarnConfiguration.DEFAULT_SCM_UPLOADER_SERVER_ADDRESS,
YarnConfiguration.DEFAULT_SCM_UPLOADER_SERVER_PORT);
proxy =
(SCMUploaderProtocol) rpc.getProxy(
SCMUploaderProtocol.class, scmAddress, conf);
}
@After
public void cleanUp() {
if (store != null) {
store.stop();
}
if (service != null) {
service.stop();
}
if (proxy != null) {
RPC.stopProxy(proxy);
}
}
@Test
public void testNotify_noEntry() throws Exception {
long accepted =
SharedCacheUploaderMetrics.getInstance().getAcceptedUploads();
SCMUploaderNotifyRequest request =
recordFactory.newRecordInstance(SCMUploaderNotifyRequest.class);
request.setResourceKey("key1");
request.setFilename("foo.jar");
assertTrue(proxy.notify(request).getAccepted());
Collection<SharedCacheResourceReference> set =
store.getResourceReferences("key1");
assertNotNull(set);
assertEquals(0, set.size());
assertEquals(
"NM upload metrics aren't updated.", 1,
SharedCacheUploaderMetrics.getInstance().getAcceptedUploads() -
accepted);
}
@Test
public void testNotify_entryExists_differentName() throws Exception {
long rejected =
SharedCacheUploaderMetrics.getInstance().getRejectUploads();
store.addResource("key1", "foo.jar");
SCMUploaderNotifyRequest request =
recordFactory.newRecordInstance(SCMUploaderNotifyRequest.class);
request.setResourceKey("key1");
request.setFilename("foobar.jar");
assertFalse(proxy.notify(request).getAccepted());
Collection<SharedCacheResourceReference> set =
store.getResourceReferences("key1");
assertNotNull(set);
assertEquals(0, set.size());
assertEquals(
"NM upload metrics aren't updated.", 1,
SharedCacheUploaderMetrics.getInstance().getRejectUploads() -
rejected);
}
@Test
public void testNotify_entryExists_sameName() throws Exception {
long accepted =
SharedCacheUploaderMetrics.getInstance().getAcceptedUploads();
store.addResource("key1", "foo.jar");
SCMUploaderNotifyRequest request =
recordFactory.newRecordInstance(SCMUploaderNotifyRequest.class);
request.setResourceKey("key1");
request.setFilename("foo.jar");
assertTrue(proxy.notify(request).getAccepted());
Collection<SharedCacheResourceReference> set =
store.getResourceReferences("key1");
assertNotNull(set);
assertEquals(0, set.size());
assertEquals(
"NM upload metrics aren't updated.", 1,
SharedCacheUploaderMetrics.getInstance().getAcceptedUploads() -
accepted);
}
}
| 6,241 | 32.026455 | 91 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-sharedcachemanager/src/test/java/org/apache/hadoop/yarn/server/sharedcachemanager/DummyAppChecker.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.sharedcachemanager;
import java.util.ArrayList;
import java.util.Collection;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.exceptions.YarnException;
/**
* A dummy app checker class for testing only.
*/
public class DummyAppChecker extends AppChecker {
@Override
@Private
public boolean isApplicationActive(ApplicationId id) throws YarnException {
return false;
}
@Override
@Private
public Collection<ApplicationId> getActiveApplications() throws YarnException {
return new ArrayList<ApplicationId>();
}
}
| 1,489 | 32.863636 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-sharedcachemanager/src/test/java/org/apache/hadoop/yarn/server/sharedcachemanager/metrics/TestCleanerMetrics.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.sharedcachemanager.metrics;
import static org.junit.Assert.assertEquals;
import org.apache.hadoop.conf.Configuration;
import org.junit.Before;
import org.junit.Test;
public class TestCleanerMetrics {
Configuration conf = new Configuration();
CleanerMetrics cleanerMetrics;
@Before
public void init() {
cleanerMetrics = CleanerMetrics.getInstance();
}
@Test
public void testMetricsOverMultiplePeriods() {
simulateACleanerRun();
assertMetrics(4, 4, 1, 1);
simulateACleanerRun();
assertMetrics(4, 8, 1, 2);
}
public void simulateACleanerRun() {
cleanerMetrics.reportCleaningStart();
cleanerMetrics.reportAFileProcess();
cleanerMetrics.reportAFileDelete();
cleanerMetrics.reportAFileProcess();
cleanerMetrics.reportAFileProcess();
}
void assertMetrics(int proc, int totalProc, int del, int totalDel) {
assertEquals(
"Processed files in the last period are not measured correctly", proc,
cleanerMetrics.getProcessedFiles());
assertEquals("Total processed files are not measured correctly",
totalProc, cleanerMetrics.getTotalProcessedFiles());
assertEquals(
"Deleted files in the last period are not measured correctly", del,
cleanerMetrics.getDeletedFiles());
assertEquals("Total deleted files are not measured correctly",
totalDel, cleanerMetrics.getTotalDeletedFiles());
}
}
| 2,253 | 33.676923 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-sharedcachemanager/src/test/java/org/apache/hadoop/yarn/server/sharedcachemanager/store/TestInMemorySCMStore.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.sharedcachemanager.store;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertSame;
import static org.junit.Assert.assertTrue;
import static org.mockito.Matchers.isA;
import static org.mockito.Mockito.doReturn;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.spy;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.Callable;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.server.sharedcachemanager.AppChecker;
import org.apache.hadoop.yarn.server.sharedcachemanager.DummyAppChecker;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
public class TestInMemorySCMStore extends SCMStoreBaseTest {
private InMemorySCMStore store;
private AppChecker checker;
@Override
Class<? extends SCMStore> getStoreClass() {
return InMemorySCMStore.class;
}
@Before
public void setup() {
this.checker = spy(new DummyAppChecker());
this.store = spy(new InMemorySCMStore(checker));
}
@After
public void cleanup() {
if (this.store != null) {
this.store.stop();
}
}
private void startEmptyStore() throws Exception {
doReturn(new ArrayList<ApplicationId>()).when(checker)
.getActiveApplications();
doReturn(new HashMap<String, String>()).when(store)
.getInitialCachedResources(isA(FileSystem.class),
isA(Configuration.class));
this.store.init(new Configuration());
this.store.start();
}
private Map<String, String> startStoreWithResources() throws Exception {
Map<String, String> initialCachedResources = new HashMap<String, String>();
int count = 10;
for (int i = 0; i < count; i++) {
String key = String.valueOf(i);
String fileName = key + ".jar";
initialCachedResources.put(key, fileName);
}
doReturn(new ArrayList<ApplicationId>()).when(checker)
.getActiveApplications();
doReturn(initialCachedResources).when(store).getInitialCachedResources(
isA(FileSystem.class), isA(Configuration.class));
this.store.init(new Configuration());
this.store.start();
return initialCachedResources;
}
private void startStoreWithApps() throws Exception {
ArrayList<ApplicationId> list = new ArrayList<ApplicationId>();
int count = 5;
for (int i = 0; i < count; i++) {
list.add(createAppId(i, i));
}
doReturn(list).when(checker).getActiveApplications();
doReturn(new HashMap<String, String>()).when(store)
.getInitialCachedResources(isA(FileSystem.class),
isA(Configuration.class));
this.store.init(new Configuration());
this.store.start();
}
@Test
public void testAddResourceConcurrency() throws Exception {
startEmptyStore();
final String key = "key1";
int count = 5;
ExecutorService exec = Executors.newFixedThreadPool(count);
List<Future<String>> futures = new ArrayList<Future<String>>(count);
final CountDownLatch start = new CountDownLatch(1);
for (int i = 0; i < count; i++) {
final String fileName = "foo-" + i + ".jar";
Callable<String> task = new Callable<String>() {
public String call() throws Exception {
start.await();
String result = store.addResource(key, fileName);
System.out.println("fileName: " + fileName + ", result: " + result);
return result;
}
};
futures.add(exec.submit(task));
}
// start them all at the same time
start.countDown();
// check the result; they should all agree with the value
Set<String> results = new HashSet<String>();
for (Future<String> future: futures) {
results.add(future.get());
}
assertSame(1, results.size());
exec.shutdown();
}
@Test
public void testAddResourceRefNonExistentResource() throws Exception {
startEmptyStore();
String key = "key1";
ApplicationId id = createAppId(1, 1L);
// try adding an app id without adding the key first
assertNull(store.addResourceReference(key,
new SharedCacheResourceReference(id, "user")));
}
@Test
public void testRemoveResourceEmptyRefs() throws Exception {
startEmptyStore();
String key = "key1";
String fileName = "foo.jar";
// first add resource
store.addResource(key, fileName);
// try removing the resource; it should return true
assertTrue(store.removeResource(key));
}
@Test
public void testAddResourceRefRemoveResource() throws Exception {
startEmptyStore();
String key = "key1";
ApplicationId id = createAppId(1, 1L);
String user = "user";
// add the resource, and then add a resource ref
store.addResource(key, "foo.jar");
store.addResourceReference(key, new SharedCacheResourceReference(id, user));
// removeResource should return false
assertTrue(!store.removeResource(key));
// the resource and the ref should be intact
Collection<SharedCacheResourceReference> refs = store.getResourceReferences(key);
assertTrue(refs != null);
assertEquals(Collections.singleton(new SharedCacheResourceReference(id, user)), refs);
}
@Test
public void testAddResourceRefConcurrency() throws Exception {
startEmptyStore();
final String key = "key1";
final String user = "user";
String fileName = "foo.jar";
// first add the resource
store.addResource(key, fileName);
// make concurrent addResourceRef calls (clients)
int count = 5;
ExecutorService exec = Executors.newFixedThreadPool(count);
List<Future<String>> futures = new ArrayList<Future<String>>(count);
final CountDownLatch start = new CountDownLatch(1);
for (int i = 0; i < count; i++) {
final ApplicationId id = createAppId(i, i);
Callable<String> task = new Callable<String>() {
public String call() throws Exception {
start.await();
return store.addResourceReference(key,
new SharedCacheResourceReference(id, user));
}
};
futures.add(exec.submit(task));
}
// start them all at the same time
start.countDown();
// check the result
Set<String> results = new HashSet<String>();
for (Future<String> future: futures) {
results.add(future.get());
}
// they should all have the same file name
assertSame(1, results.size());
assertEquals(Collections.singleton(fileName), results);
// there should be 5 refs as a result
Collection<SharedCacheResourceReference> refs = store.getResourceReferences(key);
assertSame(count, refs.size());
exec.shutdown();
}
@Test
public void testAddResourceRefAddResourceConcurrency() throws Exception {
startEmptyStore();
final String key = "key1";
final String fileName = "foo.jar";
final String user = "user";
final ApplicationId id = createAppId(1, 1L);
// add the resource and add the resource ref at the same time
ExecutorService exec = Executors.newFixedThreadPool(2);
final CountDownLatch start = new CountDownLatch(1);
Callable<String> addKeyTask = new Callable<String>() {
public String call() throws Exception {
start.await();
return store.addResource(key, fileName);
}
};
Callable<String> addAppIdTask = new Callable<String>() {
public String call() throws Exception {
start.await();
return store.addResourceReference(key,
new SharedCacheResourceReference(id, user));
}
};
Future<String> addAppIdFuture = exec.submit(addAppIdTask);
Future<String> addKeyFuture = exec.submit(addKeyTask);
// start them at the same time
start.countDown();
// get the results
String addKeyResult = addKeyFuture.get();
String addAppIdResult = addAppIdFuture.get();
assertEquals(fileName, addKeyResult);
System.out.println("addAppId() result: " + addAppIdResult);
// it may be null or the fileName depending on the timing
assertTrue(addAppIdResult == null || addAppIdResult.equals(fileName));
exec.shutdown();
}
@Test
public void testRemoveRef() throws Exception {
startEmptyStore();
String key = "key1";
String fileName = "foo.jar";
String user = "user";
// first add the resource
store.addResource(key, fileName);
// add a ref
ApplicationId id = createAppId(1, 1L);
SharedCacheResourceReference myRef = new SharedCacheResourceReference(id, user);
String result = store.addResourceReference(key, myRef);
assertEquals(fileName, result);
Collection<SharedCacheResourceReference> refs = store.getResourceReferences(key);
assertSame(1, refs.size());
assertEquals(Collections.singleton(myRef), refs);
// remove the same ref
store.removeResourceReferences(key, Collections.singleton(myRef), true);
Collection<SharedCacheResourceReference> newRefs = store.getResourceReferences(key);
assertTrue(newRefs == null || newRefs.isEmpty());
}
@Test
public void testBootstrapping() throws Exception {
Map<String, String> initialCachedResources = startStoreWithResources();
int count = initialCachedResources.size();
ApplicationId id = createAppId(1, 1L);
// the entries from the cached entries should now exist
for (int i = 0; i < count; i++) {
String key = String.valueOf(i);
String fileName = key + ".jar";
String result =
store.addResourceReference(key, new SharedCacheResourceReference(id,
"user"));
// the value should not be null (i.e. it has the key) and the filename should match
assertEquals(fileName, result);
// the initial input should be emptied
assertTrue(initialCachedResources.isEmpty());
}
}
@Test
public void testEvictableWithInitialApps() throws Exception {
startStoreWithApps();
assertFalse(store.isResourceEvictable("key", mock(FileStatus.class)));
}
private ApplicationId createAppId(int id, long timestamp) {
return ApplicationId.newInstance(timestamp, id);
}
}
| 11,411 | 34.886792 | 90 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-sharedcachemanager/src/test/java/org/apache/hadoop/yarn/server/sharedcachemanager/store/SCMStoreBaseTest.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.sharedcachemanager.store;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.util.ReflectionUtils;
import org.junit.Test;
/**
* All test classes that test an SCMStore implementation must extend this class.
*/
public abstract class SCMStoreBaseTest {
/**
* Get the SCMStore implementation class associated with this test class.
*/
abstract Class<? extends SCMStore> getStoreClass();
@Test
public void TestZeroArgConstructor() throws Exception {
// Test that the SCMStore implementation class is compatible with
// ReflectionUtils#newInstance
ReflectionUtils.newInstance(getStoreClass(), new Configuration());
}
}
| 1,511 | 35 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-sharedcachemanager/src/main/java/org/apache/hadoop/yarn/server/sharedcachemanager/ClientProtocolService.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.sharedcachemanager;
import java.io.IOException;
import java.net.InetSocketAddress;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Evolving;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.ipc.Server;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.service.AbstractService;
import org.apache.hadoop.yarn.api.ClientSCMProtocol;
import org.apache.hadoop.yarn.api.protocolrecords.ReleaseSharedCacheResourceRequest;
import org.apache.hadoop.yarn.api.protocolrecords.ReleaseSharedCacheResourceResponse;
import org.apache.hadoop.yarn.api.protocolrecords.UseSharedCacheResourceRequest;
import org.apache.hadoop.yarn.api.protocolrecords.UseSharedCacheResourceResponse;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.factories.RecordFactory;
import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
import org.apache.hadoop.yarn.ipc.RPCUtil;
import org.apache.hadoop.yarn.ipc.YarnRPC;
import org.apache.hadoop.yarn.server.sharedcache.SharedCacheUtil;
import org.apache.hadoop.yarn.server.sharedcachemanager.metrics.ClientSCMMetrics;
import org.apache.hadoop.yarn.server.sharedcachemanager.store.SCMStore;
import org.apache.hadoop.yarn.server.sharedcachemanager.store.SharedCacheResourceReference;
/**
* This service handles all rpc calls from the client to the shared cache
* manager.
*/
@Private
@Evolving
public class ClientProtocolService extends AbstractService implements
ClientSCMProtocol {
private static final Log LOG = LogFactory.getLog(ClientProtocolService.class);
private final RecordFactory recordFactory = RecordFactoryProvider
.getRecordFactory(null);
private Server server;
InetSocketAddress clientBindAddress;
private final SCMStore store;
private int cacheDepth;
private String cacheRoot;
private ClientSCMMetrics metrics;
public ClientProtocolService(SCMStore store) {
super(ClientProtocolService.class.getName());
this.store = store;
}
@Override
protected void serviceInit(Configuration conf) throws Exception {
this.clientBindAddress = getBindAddress(conf);
this.cacheDepth = SharedCacheUtil.getCacheDepth(conf);
this.cacheRoot =
conf.get(YarnConfiguration.SHARED_CACHE_ROOT,
YarnConfiguration.DEFAULT_SHARED_CACHE_ROOT);
super.serviceInit(conf);
}
InetSocketAddress getBindAddress(Configuration conf) {
return conf.getSocketAddr(YarnConfiguration.SCM_CLIENT_SERVER_ADDRESS,
YarnConfiguration.DEFAULT_SCM_CLIENT_SERVER_ADDRESS,
YarnConfiguration.DEFAULT_SCM_CLIENT_SERVER_PORT);
}
@Override
protected void serviceStart() throws Exception {
Configuration conf = getConfig();
this.metrics = ClientSCMMetrics.getInstance();
YarnRPC rpc = YarnRPC.create(conf);
this.server =
rpc.getServer(ClientSCMProtocol.class, this,
clientBindAddress,
conf, null, // Secret manager null for now (security not supported)
conf.getInt(YarnConfiguration.SCM_CLIENT_SERVER_THREAD_COUNT,
YarnConfiguration.DEFAULT_SCM_CLIENT_SERVER_THREAD_COUNT));
// TODO (YARN-2774): Enable service authorization
this.server.start();
clientBindAddress =
conf.updateConnectAddr(YarnConfiguration.SCM_CLIENT_SERVER_ADDRESS,
server.getListenerAddress());
super.serviceStart();
}
@Override
protected void serviceStop() throws Exception {
if (this.server != null) {
this.server.stop();
}
super.serviceStop();
}
@Override
public UseSharedCacheResourceResponse use(
UseSharedCacheResourceRequest request) throws YarnException,
IOException {
UseSharedCacheResourceResponse response =
recordFactory.newRecordInstance(UseSharedCacheResourceResponse.class);
UserGroupInformation callerUGI;
try {
callerUGI = UserGroupInformation.getCurrentUser();
} catch (IOException ie) {
LOG.info("Error getting UGI ", ie);
throw RPCUtil.getRemoteException(ie);
}
String fileName =
this.store.addResourceReference(request.getResourceKey(),
new SharedCacheResourceReference(request.getAppId(),
callerUGI.getShortUserName()));
if (fileName != null) {
response
.setPath(getCacheEntryFilePath(request.getResourceKey(), fileName));
this.metrics.incCacheHitCount();
} else {
this.metrics.incCacheMissCount();
}
return response;
}
@Override
public ReleaseSharedCacheResourceResponse release(
ReleaseSharedCacheResourceRequest request) throws YarnException,
IOException {
ReleaseSharedCacheResourceResponse response =
recordFactory
.newRecordInstance(ReleaseSharedCacheResourceResponse.class);
UserGroupInformation callerUGI;
try {
callerUGI = UserGroupInformation.getCurrentUser();
} catch (IOException ie) {
LOG.info("Error getting UGI ", ie);
throw RPCUtil.getRemoteException(ie);
}
boolean removed =
this.store.removeResourceReference(
request.getResourceKey(),
new SharedCacheResourceReference(request.getAppId(), callerUGI
.getShortUserName()), true);
if (removed) {
this.metrics.incCacheRelease();
}
return response;
}
private String getCacheEntryFilePath(String checksum, String filename) {
return SharedCacheUtil.getCacheEntryPath(this.cacheDepth,
this.cacheRoot, checksum) + Path.SEPARATOR_CHAR + filename;
}
}
| 6,658 | 33.502591 | 91 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-sharedcachemanager/src/main/java/org/apache/hadoop/yarn/server/sharedcachemanager/CleanerService.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.sharedcachemanager;
import java.io.IOException;
import java.lang.management.ManagementFactory;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.ThreadFactory;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantLock;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Evolving;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.service.CompositeService;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.server.sharedcachemanager.metrics.CleanerMetrics;
import org.apache.hadoop.yarn.server.sharedcachemanager.store.SCMStore;
import com.google.common.util.concurrent.ThreadFactoryBuilder;
/**
* The cleaner service that maintains the shared cache area, and cleans up stale
* entries on a regular basis.
*/
@Private
@Evolving
public class CleanerService extends CompositeService {
/**
* The name of the global cleaner lock that the cleaner creates to indicate
* that a cleaning process is in progress.
*/
public static final String GLOBAL_CLEANER_PID = ".cleaner_pid";
private static final Log LOG = LogFactory.getLog(CleanerService.class);
private Configuration conf;
private CleanerMetrics metrics;
private ScheduledExecutorService scheduledExecutor;
private final SCMStore store;
private final Lock cleanerTaskLock;
public CleanerService(SCMStore store) {
super("CleanerService");
this.store = store;
this.cleanerTaskLock = new ReentrantLock();
}
@Override
protected void serviceInit(Configuration conf) throws Exception {
this.conf = conf;
// create scheduler executor service that services the cleaner tasks
// use 2 threads to accommodate the on-demand tasks and reduce the chance of
// back-to-back runs
ThreadFactory tf =
new ThreadFactoryBuilder().setNameFormat("Shared cache cleaner").build();
scheduledExecutor = Executors.newScheduledThreadPool(2, tf);
super.serviceInit(conf);
}
@Override
protected void serviceStart() throws Exception {
if (!writeGlobalCleanerPidFile()) {
throw new YarnException("The global cleaner pid file already exists! " +
"It appears there is another CleanerService running in the cluster");
}
this.metrics = CleanerMetrics.getInstance();
// Start dependent services (i.e. AppChecker)
super.serviceStart();
Runnable task =
CleanerTask.create(conf, store, metrics, cleanerTaskLock);
long periodInMinutes = getPeriod(conf);
scheduledExecutor.scheduleAtFixedRate(task, getInitialDelay(conf),
periodInMinutes, TimeUnit.MINUTES);
LOG.info("Scheduled the shared cache cleaner task to run every "
+ periodInMinutes + " minutes.");
}
@Override
protected void serviceStop() throws Exception {
LOG.info("Shutting down the background thread.");
scheduledExecutor.shutdownNow();
try {
if (scheduledExecutor.awaitTermination(10, TimeUnit.SECONDS)) {
LOG.info("The background thread stopped.");
} else {
LOG.warn("Gave up waiting for the cleaner task to shutdown.");
}
} catch (InterruptedException e) {
LOG.warn("The cleaner service was interrupted while shutting down the task.",
e);
}
removeGlobalCleanerPidFile();
super.serviceStop();
}
/**
* Execute an on-demand cleaner task.
*/
protected void runCleanerTask() {
Runnable task =
CleanerTask.create(conf, store, metrics, cleanerTaskLock);
// this is a non-blocking call (it simply submits the task to the executor
// queue and returns)
this.scheduledExecutor.execute(task);
}
/**
* To ensure there are not multiple instances of the SCM running on a given
* cluster, a global pid file is used. This file contains the hostname of the
* machine that owns the pid file.
*
* @return true if the pid file was written, false otherwise
* @throws YarnException
*/
private boolean writeGlobalCleanerPidFile() throws YarnException {
String root =
conf.get(YarnConfiguration.SHARED_CACHE_ROOT,
YarnConfiguration.DEFAULT_SHARED_CACHE_ROOT);
Path pidPath = new Path(root, GLOBAL_CLEANER_PID);
try {
FileSystem fs = FileSystem.get(this.conf);
if (fs.exists(pidPath)) {
return false;
}
FSDataOutputStream os = fs.create(pidPath, false);
// write the hostname and the process id in the global cleaner pid file
final String ID = ManagementFactory.getRuntimeMXBean().getName();
os.writeUTF(ID);
os.close();
// add it to the delete-on-exit to ensure it gets deleted when the JVM
// exits
fs.deleteOnExit(pidPath);
} catch (IOException e) {
throw new YarnException(e);
}
LOG.info("Created the global cleaner pid file at " + pidPath.toString());
return true;
}
private void removeGlobalCleanerPidFile() {
try {
FileSystem fs = FileSystem.get(this.conf);
String root =
conf.get(YarnConfiguration.SHARED_CACHE_ROOT,
YarnConfiguration.DEFAULT_SHARED_CACHE_ROOT);
Path pidPath = new Path(root, GLOBAL_CLEANER_PID);
fs.delete(pidPath, false);
LOG.info("Removed the global cleaner pid file at " + pidPath.toString());
} catch (IOException e) {
LOG.error(
"Unable to remove the global cleaner pid file! The file may need "
+ "to be removed manually.", e);
}
}
private static int getInitialDelay(Configuration conf) {
int initialDelayInMinutes =
conf.getInt(YarnConfiguration.SCM_CLEANER_INITIAL_DELAY_MINS,
YarnConfiguration.DEFAULT_SCM_CLEANER_INITIAL_DELAY_MINS);
// negative value is invalid; use the default
if (initialDelayInMinutes < 0) {
throw new HadoopIllegalArgumentException("Negative initial delay value: "
+ initialDelayInMinutes
+ ". The initial delay must be greater than zero.");
}
return initialDelayInMinutes;
}
private static int getPeriod(Configuration conf) {
int periodInMinutes =
conf.getInt(YarnConfiguration.SCM_CLEANER_PERIOD_MINS,
YarnConfiguration.DEFAULT_SCM_CLEANER_PERIOD_MINS);
// non-positive value is invalid; use the default
if (periodInMinutes <= 0) {
throw new HadoopIllegalArgumentException("Non-positive period value: "
+ periodInMinutes
+ ". The cleaner period must be greater than or equal to zero.");
}
return periodInMinutes;
}
}
| 7,867 | 34.926941 | 83 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-sharedcachemanager/src/main/java/org/apache/hadoop/yarn/server/sharedcachemanager/SharedCacheUploaderService.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.sharedcachemanager;
import java.io.IOException;
import java.net.InetSocketAddress;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.ipc.Server;
import org.apache.hadoop.service.AbstractService;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.factories.RecordFactory;
import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
import org.apache.hadoop.yarn.ipc.YarnRPC;
import org.apache.hadoop.yarn.server.api.SCMUploaderProtocol;
import org.apache.hadoop.yarn.server.api.protocolrecords.SCMUploaderCanUploadRequest;
import org.apache.hadoop.yarn.server.api.protocolrecords.SCMUploaderCanUploadResponse;
import org.apache.hadoop.yarn.server.api.protocolrecords.SCMUploaderNotifyRequest;
import org.apache.hadoop.yarn.server.api.protocolrecords.SCMUploaderNotifyResponse;
import org.apache.hadoop.yarn.server.sharedcachemanager.metrics.SharedCacheUploaderMetrics;
import org.apache.hadoop.yarn.server.sharedcachemanager.store.SCMStore;
/**
* This service handles all rpc calls from the NodeManager uploader to the
* shared cache manager.
*/
public class SharedCacheUploaderService extends AbstractService
implements SCMUploaderProtocol {
private final RecordFactory recordFactory = RecordFactoryProvider
.getRecordFactory(null);
private Server server;
InetSocketAddress bindAddress;
private final SCMStore store;
private SharedCacheUploaderMetrics metrics;
public SharedCacheUploaderService(SCMStore store) {
super(SharedCacheUploaderService.class.getName());
this.store = store;
}
@Override
protected void serviceInit(Configuration conf) throws Exception {
this.bindAddress = getBindAddress(conf);
super.serviceInit(conf);
}
InetSocketAddress getBindAddress(Configuration conf) {
return conf.getSocketAddr(YarnConfiguration.SCM_UPLOADER_SERVER_ADDRESS,
YarnConfiguration.DEFAULT_SCM_UPLOADER_SERVER_ADDRESS,
YarnConfiguration.DEFAULT_SCM_UPLOADER_SERVER_PORT);
}
@Override
protected void serviceStart() throws Exception {
Configuration conf = getConfig();
this.metrics = SharedCacheUploaderMetrics.getInstance();
YarnRPC rpc = YarnRPC.create(conf);
this.server =
rpc.getServer(SCMUploaderProtocol.class, this, bindAddress,
conf, null, // Secret manager null for now (security not supported)
conf.getInt(YarnConfiguration.SCM_UPLOADER_SERVER_THREAD_COUNT,
YarnConfiguration.DEFAULT_SCM_UPLOADER_SERVER_THREAD_COUNT));
// TODO (YARN-2774): Enable service authorization
this.server.start();
bindAddress =
conf.updateConnectAddr(YarnConfiguration.SCM_UPLOADER_SERVER_ADDRESS,
server.getListenerAddress());
super.serviceStart();
}
@Override
protected void serviceStop() throws Exception {
if (this.server != null) {
this.server.stop();
this.server = null;
}
super.serviceStop();
}
@Override
public SCMUploaderNotifyResponse notify(SCMUploaderNotifyRequest request)
throws YarnException, IOException {
SCMUploaderNotifyResponse response =
recordFactory.newRecordInstance(SCMUploaderNotifyResponse.class);
// TODO (YARN-2774): proper security/authorization needs to be implemented
String filename =
store.addResource(request.getResourceKey(), request.getFileName());
boolean accepted = filename.equals(request.getFileName());
if (accepted) {
this.metrics.incAcceptedUploads();
} else {
this.metrics.incRejectedUploads();
}
response.setAccepted(accepted);
return response;
}
@Override
public SCMUploaderCanUploadResponse canUpload(
SCMUploaderCanUploadRequest request) throws YarnException, IOException {
// TODO (YARN-2781): we may want to have a more flexible policy of
// instructing the node manager to upload only if it meets a certain
// criteria
// until then we return true for now
SCMUploaderCanUploadResponse response =
recordFactory.newRecordInstance(SCMUploaderCanUploadResponse.class);
response.setUploadable(true);
return response;
}
}
| 5,069 | 34.957447 | 91 |
java
|
hadoop
|
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-sharedcachemanager/src/main/java/org/apache/hadoop/yarn/server/sharedcachemanager/CleanerTask.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.sharedcachemanager;
import java.io.IOException;
import java.util.concurrent.locks.Lock;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Evolving;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.server.sharedcache.SharedCacheUtil;
import org.apache.hadoop.yarn.server.sharedcachemanager.metrics.CleanerMetrics;
import org.apache.hadoop.yarn.server.sharedcachemanager.store.SCMStore;
/**
* The task that runs and cleans up the shared cache area for stale entries and
* orphaned files. It is expected that only one cleaner task runs at any given
* point in time.
*/
@Private
@Evolving
class CleanerTask implements Runnable {
private static final String RENAMED_SUFFIX = "-renamed";
private static final Log LOG = LogFactory.getLog(CleanerTask.class);
private final String location;
private final long sleepTime;
private final int nestedLevel;
private final Path root;
private final FileSystem fs;
private final SCMStore store;
private final CleanerMetrics metrics;
private final Lock cleanerTaskLock;
/**
* Creates a cleaner task based on the configuration. This is provided for
* convenience.
*
* @param conf
* @param store
* @param metrics
* @param cleanerTaskLock lock that ensures a serial execution of cleaner
* task
* @return an instance of a CleanerTask
*/
public static CleanerTask create(Configuration conf, SCMStore store,
CleanerMetrics metrics, Lock cleanerTaskLock) {
try {
// get the root directory for the shared cache
String location =
conf.get(YarnConfiguration.SHARED_CACHE_ROOT,
YarnConfiguration.DEFAULT_SHARED_CACHE_ROOT);
long sleepTime =
conf.getLong(YarnConfiguration.SCM_CLEANER_RESOURCE_SLEEP_MS,
YarnConfiguration.DEFAULT_SCM_CLEANER_RESOURCE_SLEEP_MS);
int nestedLevel = SharedCacheUtil.getCacheDepth(conf);
FileSystem fs = FileSystem.get(conf);
return new CleanerTask(location, sleepTime, nestedLevel, fs, store,
metrics, cleanerTaskLock);
} catch (IOException e) {
LOG.error("Unable to obtain the filesystem for the cleaner service", e);
throw new ExceptionInInitializerError(e);
}
}
/**
* Creates a cleaner task based on the root directory location and the
* filesystem.
*/
CleanerTask(String location, long sleepTime, int nestedLevel, FileSystem fs,
SCMStore store, CleanerMetrics metrics, Lock cleanerTaskLock) {
this.location = location;
this.sleepTime = sleepTime;
this.nestedLevel = nestedLevel;
this.root = new Path(location);
this.fs = fs;
this.store = store;
this.metrics = metrics;
this.cleanerTaskLock = cleanerTaskLock;
}
@Override
public void run() {
if (!this.cleanerTaskLock.tryLock()) {
// there is already another task running
LOG.warn("A cleaner task is already running. "
+ "This scheduled cleaner task will do nothing.");
return;
}
try {
if (!fs.exists(root)) {
LOG.error("The shared cache root " + location + " was not found. "
+ "The cleaner task will do nothing.");
return;
}
// we're now ready to process the shared cache area
process();
} catch (Throwable e) {
LOG.error("Unexpected exception while initializing the cleaner task. "
+ "This task will do nothing,", e);
} finally {
// this is set to false regardless of if it is a scheduled or on-demand
// task
this.cleanerTaskLock.unlock();
}
}
/**
* Sweeps and processes the shared cache area to clean up stale and orphaned
* files.
*/
void process() {
// mark the beginning of the run in the metrics
metrics.reportCleaningStart();
try {
// now traverse individual directories and process them
// the directory structure is specified by the nested level parameter
// (e.g. 9/c/d/<checksum>)
String pattern = SharedCacheUtil.getCacheEntryGlobPattern(nestedLevel);
FileStatus[] resources =
fs.globStatus(new Path(root, pattern));
int numResources = resources == null ? 0 : resources.length;
LOG.info("Processing " + numResources + " resources in the shared cache");
long beginMs = System.currentTimeMillis();
if (resources != null) {
for (FileStatus resource : resources) {
// check for interruption so it can abort in a timely manner in case
// of shutdown
if (Thread.currentThread().isInterrupted()) {
LOG.warn("The cleaner task was interrupted. Aborting.");
break;
}
if (resource.isDirectory()) {
processSingleResource(resource);
} else {
LOG.warn("Invalid file at path " + resource.getPath().toString()
+
" when a directory was expected");
}
// add sleep time between cleaning each directory if it is non-zero
if (sleepTime > 0) {
Thread.sleep(sleepTime);
}
}
}
long endMs = System.currentTimeMillis();
long durationMs = endMs - beginMs;
LOG.info("Processed " + numResources + " resource(s) in " + durationMs +
" ms.");
} catch (IOException e1) {
LOG.error("Unable to complete the cleaner task", e1);
} catch (InterruptedException e2) {
Thread.currentThread().interrupt(); // restore the interrupt
}
}
/**
* Returns a path for the root directory for the shared cache.
*/
Path getRootPath() {
return root;
}
/**
* Processes a single shared cache resource directory.
*/
void processSingleResource(FileStatus resource) {
Path path = resource.getPath();
// indicates the processing status of the resource
ResourceStatus resourceStatus = ResourceStatus.INIT;
// first, if the path ends with the renamed suffix, it indicates the
// directory was moved (as stale) but somehow not deleted (probably due to
// SCM failure); delete the directory
if (path.toString().endsWith(RENAMED_SUFFIX)) {
LOG.info("Found a renamed directory that was left undeleted at " +
path.toString() + ". Deleting.");
try {
if (fs.delete(path, true)) {
resourceStatus = ResourceStatus.DELETED;
}
} catch (IOException e) {
LOG.error("Error while processing a shared cache resource: " + path, e);
}
} else {
// this is the path to the cache resource directory
// the directory name is the resource key (i.e. a unique identifier)
String key = path.getName();
try {
store.cleanResourceReferences(key);
} catch (YarnException e) {
LOG.error("Exception thrown while removing dead appIds.", e);
}
if (store.isResourceEvictable(key, resource)) {
try {
/*
* TODO See YARN-2663: There is a race condition between
* store.removeResource(key) and
* removeResourceFromCacheFileSystem(path) operations because they do
* not happen atomically and resources can be uploaded with different
* file names by the node managers.
*/
// remove the resource from scm (checks for appIds as well)
if (store.removeResource(key)) {
// remove the resource from the file system
boolean deleted = removeResourceFromCacheFileSystem(path);
if (deleted) {
resourceStatus = ResourceStatus.DELETED;
} else {
LOG.error("Failed to remove path from the file system."
+ " Skipping this resource: " + path);
resourceStatus = ResourceStatus.ERROR;
}
} else {
// we did not delete the resource because it contained application
// ids
resourceStatus = ResourceStatus.PROCESSED;
}
} catch (IOException e) {
LOG.error(
"Failed to remove path from the file system. Skipping this resource: "
+ path, e);
resourceStatus = ResourceStatus.ERROR;
}
} else {
resourceStatus = ResourceStatus.PROCESSED;
}
}
// record the processing
switch (resourceStatus) {
case DELETED:
metrics.reportAFileDelete();
break;
case PROCESSED:
metrics.reportAFileProcess();
break;
case ERROR:
metrics.reportAFileError();
break;
default:
LOG.error("Cleaner encountered an invalid status (" + resourceStatus
+ ") while processing resource: " + path.getName());
}
}
private boolean removeResourceFromCacheFileSystem(Path path)
throws IOException {
// rename the directory to make the delete atomic
Path renamedPath = new Path(path.toString() + RENAMED_SUFFIX);
if (fs.rename(path, renamedPath)) {
// the directory can be removed safely now
// log the original path
LOG.info("Deleting " + path.toString());
return fs.delete(renamedPath, true);
} else {
// we were unable to remove it for some reason: it's best to leave
// it at that
LOG.error("We were not able to rename the directory to "
+ renamedPath.toString() + ". We will leave it intact.");
}
return false;
}
/**
* A status indicating what happened with the processing of a given cache
* resource.
*/
private enum ResourceStatus {
INIT,
/** Resource was successfully processed, but not deleted **/
PROCESSED,
/** Resource was successfully deleted **/
DELETED,
/** The cleaner task ran into an error while processing the resource **/
ERROR
}
}
| 11,004 | 34.614887 | 84 |
java
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.