repo
stringlengths 1
191
⌀ | file
stringlengths 23
351
| code
stringlengths 0
5.32M
| file_length
int64 0
5.32M
| avg_line_length
float64 0
2.9k
| max_line_length
int64 0
288k
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/impl/pb/GetTaskReportResponsePBImpl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskReportResponse;
import org.apache.hadoop.mapreduce.v2.api.records.TaskReport;
import org.apache.hadoop.mapreduce.v2.api.records.impl.pb.TaskReportPBImpl;
import org.apache.hadoop.mapreduce.v2.proto.MRProtos.TaskReportProto;
import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetTaskReportResponseProto;
import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetTaskReportResponseProtoOrBuilder;
import org.apache.hadoop.yarn.api.records.impl.pb.ProtoBase;
public class GetTaskReportResponsePBImpl extends ProtoBase<GetTaskReportResponseProto> implements GetTaskReportResponse {
GetTaskReportResponseProto proto = GetTaskReportResponseProto.getDefaultInstance();
GetTaskReportResponseProto.Builder builder = null;
boolean viaProto = false;
private TaskReport taskReport = null;
public GetTaskReportResponsePBImpl() {
builder = GetTaskReportResponseProto.newBuilder();
}
public GetTaskReportResponsePBImpl(GetTaskReportResponseProto proto) {
this.proto = proto;
viaProto = true;
}
public GetTaskReportResponseProto getProto() {
mergeLocalToProto();
proto = viaProto ? proto : builder.build();
viaProto = true;
return proto;
}
private void mergeLocalToBuilder() {
if (this.taskReport != null) {
builder.setTaskReport(convertToProtoFormat(this.taskReport));
}
}
private void mergeLocalToProto() {
if (viaProto)
maybeInitBuilder();
mergeLocalToBuilder();
proto = builder.build();
viaProto = true;
}
private void maybeInitBuilder() {
if (viaProto || builder == null) {
builder = GetTaskReportResponseProto.newBuilder(proto);
}
viaProto = false;
}
@Override
public TaskReport getTaskReport() {
GetTaskReportResponseProtoOrBuilder p = viaProto ? proto : builder;
if (this.taskReport != null) {
return this.taskReport;
}
if (!p.hasTaskReport()) {
return null;
}
this.taskReport = convertFromProtoFormat(p.getTaskReport());
return this.taskReport;
}
@Override
public void setTaskReport(TaskReport taskReport) {
maybeInitBuilder();
if (taskReport == null)
builder.clearTaskReport();
this.taskReport = taskReport;
}
private TaskReportPBImpl convertFromProtoFormat(TaskReportProto p) {
return new TaskReportPBImpl(p);
}
private TaskReportProto convertToProtoFormat(TaskReport t) {
return ((TaskReportPBImpl)t).getProto();
}
}
| 3,414 | 30.045455 | 121 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/impl/pb/KillTaskAttemptResponsePBImpl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.KillTaskAttemptResponse;
import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.KillTaskAttemptResponseProto;
import org.apache.hadoop.yarn.api.records.impl.pb.ProtoBase;
public class KillTaskAttemptResponsePBImpl extends ProtoBase<KillTaskAttemptResponseProto> implements KillTaskAttemptResponse {
KillTaskAttemptResponseProto proto = KillTaskAttemptResponseProto.getDefaultInstance();
KillTaskAttemptResponseProto.Builder builder = null;
boolean viaProto = false;
public KillTaskAttemptResponsePBImpl() {
builder = KillTaskAttemptResponseProto.newBuilder();
}
public KillTaskAttemptResponsePBImpl(KillTaskAttemptResponseProto proto) {
this.proto = proto;
viaProto = true;
}
public KillTaskAttemptResponseProto getProto() {
proto = viaProto ? proto : builder.build();
viaProto = true;
return proto;
}
private void maybeInitBuilder() {
if (viaProto || builder == null) {
builder = KillTaskAttemptResponseProto.newBuilder(proto);
}
viaProto = false;
}
}
| 1,986 | 32.116667 | 127 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/impl/pb/GetTaskReportsResponsePBImpl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskReportsResponse;
import org.apache.hadoop.mapreduce.v2.api.records.TaskReport;
import org.apache.hadoop.mapreduce.v2.api.records.impl.pb.TaskReportPBImpl;
import org.apache.hadoop.mapreduce.v2.proto.MRProtos.TaskReportProto;
import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetTaskReportsResponseProto;
import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetTaskReportsResponseProtoOrBuilder;
import org.apache.hadoop.yarn.api.records.impl.pb.ProtoBase;
public class GetTaskReportsResponsePBImpl extends ProtoBase<GetTaskReportsResponseProto> implements GetTaskReportsResponse {
GetTaskReportsResponseProto proto = GetTaskReportsResponseProto.getDefaultInstance();
GetTaskReportsResponseProto.Builder builder = null;
boolean viaProto = false;
private List<TaskReport> taskReports = null;
public GetTaskReportsResponsePBImpl() {
builder = GetTaskReportsResponseProto.newBuilder();
}
public GetTaskReportsResponsePBImpl(GetTaskReportsResponseProto proto) {
this.proto = proto;
viaProto = true;
}
public GetTaskReportsResponseProto getProto() {
mergeLocalToProto();
proto = viaProto ? proto : builder.build();
viaProto = true;
return proto;
}
private void mergeLocalToBuilder() {
if (this.taskReports != null) {
addTaskReportsToProto();
}
}
private void mergeLocalToProto() {
if (viaProto)
maybeInitBuilder();
mergeLocalToBuilder();
proto = builder.build();
viaProto = true;
}
private void maybeInitBuilder() {
if (viaProto || builder == null) {
builder = GetTaskReportsResponseProto.newBuilder(proto);
}
viaProto = false;
}
@Override
public List<TaskReport> getTaskReportList() {
initTaskReports();
return this.taskReports;
}
@Override
public TaskReport getTaskReport(int index) {
initTaskReports();
return this.taskReports.get(index);
}
@Override
public int getTaskReportCount() {
initTaskReports();
return this.taskReports.size();
}
private void initTaskReports() {
if (this.taskReports != null) {
return;
}
GetTaskReportsResponseProtoOrBuilder p = viaProto ? proto : builder;
List<TaskReportProto> list = p.getTaskReportsList();
this.taskReports = new ArrayList<TaskReport>();
for (TaskReportProto c : list) {
this.taskReports.add(convertFromProtoFormat(c));
}
}
@Override
public void addAllTaskReports(final List<TaskReport> taskReports) {
if (taskReports == null)
return;
initTaskReports();
this.taskReports.addAll(taskReports);
}
private void addTaskReportsToProto() {
maybeInitBuilder();
builder.clearTaskReports();
if (taskReports == null)
return;
Iterable<TaskReportProto> iterable = new Iterable<TaskReportProto>() {
@Override
public Iterator<TaskReportProto> iterator() {
return new Iterator<TaskReportProto>() {
Iterator<TaskReport> iter = taskReports.iterator();
@Override
public boolean hasNext() {
return iter.hasNext();
}
@Override
public TaskReportProto next() {
return convertToProtoFormat(iter.next());
}
@Override
public void remove() {
throw new UnsupportedOperationException();
}
};
}
};
builder.addAllTaskReports(iterable);
}
@Override
public void addTaskReport(TaskReport taskReports) {
initTaskReports();
this.taskReports.add(taskReports);
}
@Override
public void removeTaskReport(int index) {
initTaskReports();
this.taskReports.remove(index);
}
@Override
public void clearTaskReports() {
initTaskReports();
this.taskReports.clear();
}
private TaskReportPBImpl convertFromProtoFormat(TaskReportProto p) {
return new TaskReportPBImpl(p);
}
private TaskReportProto convertToProtoFormat(TaskReport t) {
return ((TaskReportPBImpl)t).getProto();
}
}
| 5,053 | 27.234637 | 124 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/impl/pb/CancelDelegationTokenResponsePBImpl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.CancelDelegationTokenResponse;
import org.apache.hadoop.security.proto.SecurityProtos.CancelDelegationTokenResponseProto;
import org.apache.hadoop.yarn.api.records.impl.pb.ProtoBase;
public class CancelDelegationTokenResponsePBImpl extends
ProtoBase<CancelDelegationTokenResponseProto> implements
CancelDelegationTokenResponse {
CancelDelegationTokenResponseProto proto = CancelDelegationTokenResponseProto
.getDefaultInstance();
public CancelDelegationTokenResponsePBImpl() {
}
public CancelDelegationTokenResponsePBImpl(
CancelDelegationTokenResponseProto proto) {
this.proto = proto;
}
@Override
public CancelDelegationTokenResponseProto getProto() {
return proto;
}
}
| 1,649 | 35.666667 | 90 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/impl/pb/GetJobReportRequestPBImpl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetJobReportRequest;
import org.apache.hadoop.mapreduce.v2.api.records.JobId;
import org.apache.hadoop.mapreduce.v2.api.records.impl.pb.JobIdPBImpl;
import org.apache.hadoop.mapreduce.v2.proto.MRProtos.JobIdProto;
import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetJobReportRequestProto;
import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetJobReportRequestProtoOrBuilder;
import org.apache.hadoop.yarn.api.records.impl.pb.ProtoBase;
public class GetJobReportRequestPBImpl extends ProtoBase<GetJobReportRequestProto> implements GetJobReportRequest {
GetJobReportRequestProto proto = GetJobReportRequestProto.getDefaultInstance();
GetJobReportRequestProto.Builder builder = null;
boolean viaProto = false;
private JobId jobId = null;
public GetJobReportRequestPBImpl() {
builder = GetJobReportRequestProto.newBuilder();
}
public GetJobReportRequestPBImpl(GetJobReportRequestProto proto) {
this.proto = proto;
viaProto = true;
}
public GetJobReportRequestProto getProto() {
mergeLocalToProto();
proto = viaProto ? proto : builder.build();
viaProto = true;
return proto;
}
private void mergeLocalToBuilder() {
if (this.jobId != null) {
builder.setJobId(convertToProtoFormat(this.jobId));
}
}
private void mergeLocalToProto() {
if (viaProto)
maybeInitBuilder();
mergeLocalToBuilder();
proto = builder.build();
viaProto = true;
}
private void maybeInitBuilder() {
if (viaProto || builder == null) {
builder = GetJobReportRequestProto.newBuilder(proto);
}
viaProto = false;
}
@Override
public JobId getJobId() {
GetJobReportRequestProtoOrBuilder p = viaProto ? proto : builder;
if (this.jobId != null) {
return this.jobId;
}
if (!p.hasJobId()) {
return null;
}
this.jobId = convertFromProtoFormat(p.getJobId());
return this.jobId;
}
@Override
public void setJobId(JobId jobId) {
maybeInitBuilder();
if (jobId == null)
builder.clearJobId();
this.jobId = jobId;
}
private JobIdPBImpl convertFromProtoFormat(JobIdProto p) {
return new JobIdPBImpl(p);
}
private JobIdProto convertToProtoFormat(JobId t) {
return ((JobIdPBImpl)t).getProto();
}
}
| 3,237 | 28.436364 | 115 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/impl/pb/KillTaskRequestPBImpl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.KillTaskRequest;
import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
import org.apache.hadoop.mapreduce.v2.api.records.impl.pb.TaskIdPBImpl;
import org.apache.hadoop.mapreduce.v2.proto.MRProtos.TaskIdProto;
import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.KillTaskRequestProto;
import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.KillTaskRequestProtoOrBuilder;
import org.apache.hadoop.yarn.api.records.impl.pb.ProtoBase;
public class KillTaskRequestPBImpl extends ProtoBase<KillTaskRequestProto> implements KillTaskRequest {
KillTaskRequestProto proto = KillTaskRequestProto.getDefaultInstance();
KillTaskRequestProto.Builder builder = null;
boolean viaProto = false;
private TaskId taskId = null;
public KillTaskRequestPBImpl() {
builder = KillTaskRequestProto.newBuilder();
}
public KillTaskRequestPBImpl(KillTaskRequestProto proto) {
this.proto = proto;
viaProto = true;
}
public KillTaskRequestProto getProto() {
mergeLocalToProto();
proto = viaProto ? proto : builder.build();
viaProto = true;
return proto;
}
private void mergeLocalToBuilder() {
if (this.taskId != null) {
builder.setTaskId(convertToProtoFormat(this.taskId));
}
}
private void mergeLocalToProto() {
if (viaProto)
maybeInitBuilder();
mergeLocalToBuilder();
proto = builder.build();
viaProto = true;
}
private void maybeInitBuilder() {
if (viaProto || builder == null) {
builder = KillTaskRequestProto.newBuilder(proto);
}
viaProto = false;
}
@Override
public TaskId getTaskId() {
KillTaskRequestProtoOrBuilder p = viaProto ? proto : builder;
if (this.taskId != null) {
return this.taskId;
}
if (!p.hasTaskId()) {
return null;
}
this.taskId = convertFromProtoFormat(p.getTaskId());
return this.taskId;
}
@Override
public void setTaskId(TaskId taskId) {
maybeInitBuilder();
if (taskId == null)
builder.clearTaskId();
this.taskId = taskId;
}
private TaskIdPBImpl convertFromProtoFormat(TaskIdProto p) {
return new TaskIdPBImpl(p);
}
private TaskIdProto convertToProtoFormat(TaskId t) {
return ((TaskIdPBImpl)t).getProto();
}
}
| 3,201 | 28.109091 | 103 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/impl/pb/KillTaskResponsePBImpl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.KillTaskResponse;
import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.KillTaskResponseProto;
import org.apache.hadoop.yarn.api.records.impl.pb.ProtoBase;
public class KillTaskResponsePBImpl extends ProtoBase<KillTaskResponseProto> implements KillTaskResponse {
KillTaskResponseProto proto = KillTaskResponseProto.getDefaultInstance();
KillTaskResponseProto.Builder builder = null;
boolean viaProto = false;
public KillTaskResponsePBImpl() {
builder = KillTaskResponseProto.newBuilder();
}
public KillTaskResponsePBImpl(KillTaskResponseProto proto) {
this.proto = proto;
viaProto = true;
}
public KillTaskResponseProto getProto() {
proto = viaProto ? proto : builder.build();
viaProto = true;
return proto;
}
private void maybeInitBuilder() {
if (viaProto || builder == null) {
builder = KillTaskResponseProto.newBuilder(proto);
}
viaProto = false;
}
}
| 1,888 | 30.483333 | 106 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/impl/pb/GetTaskAttemptReportResponsePBImpl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskAttemptReportResponse;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptReport;
import org.apache.hadoop.mapreduce.v2.api.records.impl.pb.TaskAttemptReportPBImpl;
import org.apache.hadoop.mapreduce.v2.proto.MRProtos.TaskAttemptReportProto;
import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetTaskAttemptReportResponseProto;
import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetTaskAttemptReportResponseProtoOrBuilder;
import org.apache.hadoop.yarn.api.records.impl.pb.ProtoBase;
public class GetTaskAttemptReportResponsePBImpl extends ProtoBase<GetTaskAttemptReportResponseProto> implements GetTaskAttemptReportResponse {
GetTaskAttemptReportResponseProto proto = GetTaskAttemptReportResponseProto.getDefaultInstance();
GetTaskAttemptReportResponseProto.Builder builder = null;
boolean viaProto = false;
private TaskAttemptReport taskAttemptReport = null;
public GetTaskAttemptReportResponsePBImpl() {
builder = GetTaskAttemptReportResponseProto.newBuilder();
}
public GetTaskAttemptReportResponsePBImpl(GetTaskAttemptReportResponseProto proto) {
this.proto = proto;
viaProto = true;
}
public GetTaskAttemptReportResponseProto getProto() {
mergeLocalToProto();
proto = viaProto ? proto : builder.build();
viaProto = true;
return proto;
}
private void mergeLocalToBuilder() {
if (this.taskAttemptReport != null) {
builder.setTaskAttemptReport(convertToProtoFormat(this.taskAttemptReport));
}
}
private void mergeLocalToProto() {
if (viaProto)
maybeInitBuilder();
mergeLocalToBuilder();
proto = builder.build();
viaProto = true;
}
private void maybeInitBuilder() {
if (viaProto || builder == null) {
builder = GetTaskAttemptReportResponseProto.newBuilder(proto);
}
viaProto = false;
}
@Override
public TaskAttemptReport getTaskAttemptReport() {
GetTaskAttemptReportResponseProtoOrBuilder p = viaProto ? proto : builder;
if (this.taskAttemptReport != null) {
return this.taskAttemptReport;
}
if (!p.hasTaskAttemptReport()) {
return null;
}
this.taskAttemptReport = convertFromProtoFormat(p.getTaskAttemptReport());
return this.taskAttemptReport;
}
@Override
public void setTaskAttemptReport(TaskAttemptReport taskAttemptReport) {
maybeInitBuilder();
if (taskAttemptReport == null)
builder.clearTaskAttemptReport();
this.taskAttemptReport = taskAttemptReport;
}
private TaskAttemptReportPBImpl convertFromProtoFormat(TaskAttemptReportProto p) {
return new TaskAttemptReportPBImpl(p);
}
private TaskAttemptReportProto convertToProtoFormat(TaskAttemptReport t) {
return ((TaskAttemptReportPBImpl)t).getProto();
}
}
| 3,729 | 32.909091 | 142 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/impl/pb/KillJobResponsePBImpl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.KillJobResponse;
import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.KillJobResponseProto;
import org.apache.hadoop.yarn.api.records.impl.pb.ProtoBase;
public class KillJobResponsePBImpl extends ProtoBase<KillJobResponseProto> implements KillJobResponse {
KillJobResponseProto proto = KillJobResponseProto.getDefaultInstance();
KillJobResponseProto.Builder builder = null;
boolean viaProto = false;
public KillJobResponsePBImpl() {
builder = KillJobResponseProto.newBuilder();
}
public KillJobResponsePBImpl(KillJobResponseProto proto) {
this.proto = proto;
viaProto = true;
}
public KillJobResponseProto getProto() {
proto = viaProto ? proto : builder.build();
viaProto = true;
return proto;
}
private void maybeInitBuilder() {
if (viaProto || builder == null) {
builder = KillJobResponseProto.newBuilder(proto);
}
viaProto = false;
}
}
| 1,874 | 30.25 | 103 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/impl/pb/GetTaskAttemptCompletionEventsResponsePBImpl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskAttemptCompletionEventsResponse;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptCompletionEvent;
import org.apache.hadoop.mapreduce.v2.api.records.impl.pb.TaskAttemptCompletionEventPBImpl;
import org.apache.hadoop.mapreduce.v2.proto.MRProtos.TaskAttemptCompletionEventProto;
import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetTaskAttemptCompletionEventsResponseProto;
import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetTaskAttemptCompletionEventsResponseProtoOrBuilder;
import org.apache.hadoop.yarn.api.records.impl.pb.ProtoBase;
public class GetTaskAttemptCompletionEventsResponsePBImpl extends ProtoBase<GetTaskAttemptCompletionEventsResponseProto> implements GetTaskAttemptCompletionEventsResponse {
GetTaskAttemptCompletionEventsResponseProto proto = GetTaskAttemptCompletionEventsResponseProto.getDefaultInstance();
GetTaskAttemptCompletionEventsResponseProto.Builder builder = null;
boolean viaProto = false;
private List<TaskAttemptCompletionEvent> completionEvents = null;
public GetTaskAttemptCompletionEventsResponsePBImpl() {
builder = GetTaskAttemptCompletionEventsResponseProto.newBuilder();
}
public GetTaskAttemptCompletionEventsResponsePBImpl(GetTaskAttemptCompletionEventsResponseProto proto) {
this.proto = proto;
viaProto = true;
}
public GetTaskAttemptCompletionEventsResponseProto getProto() {
mergeLocalToProto();
proto = viaProto ? proto : builder.build();
viaProto = true;
return proto;
}
private void mergeLocalToBuilder() {
if (this.completionEvents != null) {
addCompletionEventsToProto();
}
}
private void mergeLocalToProto() {
if (viaProto)
maybeInitBuilder();
mergeLocalToBuilder();
proto = builder.build();
viaProto = true;
}
private void maybeInitBuilder() {
if (viaProto || builder == null) {
builder = GetTaskAttemptCompletionEventsResponseProto.newBuilder(proto);
}
viaProto = false;
}
@Override
public List<TaskAttemptCompletionEvent> getCompletionEventList() {
initCompletionEvents();
return this.completionEvents;
}
@Override
public TaskAttemptCompletionEvent getCompletionEvent(int index) {
initCompletionEvents();
return this.completionEvents.get(index);
}
@Override
public int getCompletionEventCount() {
initCompletionEvents();
return this.completionEvents.size();
}
private void initCompletionEvents() {
if (this.completionEvents != null) {
return;
}
GetTaskAttemptCompletionEventsResponseProtoOrBuilder p = viaProto ? proto : builder;
List<TaskAttemptCompletionEventProto> list = p.getCompletionEventsList();
this.completionEvents = new ArrayList<TaskAttemptCompletionEvent>();
for (TaskAttemptCompletionEventProto c : list) {
this.completionEvents.add(convertFromProtoFormat(c));
}
}
@Override
public void addAllCompletionEvents(final List<TaskAttemptCompletionEvent> completionEvents) {
if (completionEvents == null)
return;
initCompletionEvents();
this.completionEvents.addAll(completionEvents);
}
private void addCompletionEventsToProto() {
maybeInitBuilder();
builder.clearCompletionEvents();
if (completionEvents == null)
return;
Iterable<TaskAttemptCompletionEventProto> iterable = new Iterable<TaskAttemptCompletionEventProto>() {
@Override
public Iterator<TaskAttemptCompletionEventProto> iterator() {
return new Iterator<TaskAttemptCompletionEventProto>() {
Iterator<TaskAttemptCompletionEvent> iter = completionEvents.iterator();
@Override
public boolean hasNext() {
return iter.hasNext();
}
@Override
public TaskAttemptCompletionEventProto next() {
return convertToProtoFormat(iter.next());
}
@Override
public void remove() {
throw new UnsupportedOperationException();
}
};
}
};
builder.addAllCompletionEvents(iterable);
}
@Override
public void addCompletionEvent(TaskAttemptCompletionEvent completionEvents) {
initCompletionEvents();
this.completionEvents.add(completionEvents);
}
@Override
public void removeCompletionEvent(int index) {
initCompletionEvents();
this.completionEvents.remove(index);
}
@Override
public void clearCompletionEvents() {
initCompletionEvents();
this.completionEvents.clear();
}
private TaskAttemptCompletionEventPBImpl convertFromProtoFormat(TaskAttemptCompletionEventProto p) {
return new TaskAttemptCompletionEventPBImpl(p);
}
private TaskAttemptCompletionEventProto convertToProtoFormat(TaskAttemptCompletionEvent t) {
return ((TaskAttemptCompletionEventPBImpl)t).getProto();
}
}
| 5,872 | 31.810056 | 172 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/impl/pb/GetTaskReportRequestPBImpl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskReportRequest;
import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
import org.apache.hadoop.mapreduce.v2.api.records.impl.pb.TaskIdPBImpl;
import org.apache.hadoop.mapreduce.v2.proto.MRProtos.TaskIdProto;
import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetTaskReportRequestProto;
import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetTaskReportRequestProtoOrBuilder;
import org.apache.hadoop.yarn.api.records.impl.pb.ProtoBase;
public class GetTaskReportRequestPBImpl extends ProtoBase<GetTaskReportRequestProto> implements GetTaskReportRequest {
GetTaskReportRequestProto proto = GetTaskReportRequestProto.getDefaultInstance();
GetTaskReportRequestProto.Builder builder = null;
boolean viaProto = false;
private TaskId taskId = null;
public GetTaskReportRequestPBImpl() {
builder = GetTaskReportRequestProto.newBuilder();
}
public GetTaskReportRequestPBImpl(GetTaskReportRequestProto proto) {
this.proto = proto;
viaProto = true;
}
public GetTaskReportRequestProto getProto() {
mergeLocalToProto();
proto = viaProto ? proto : builder.build();
viaProto = true;
return proto;
}
private void mergeLocalToBuilder() {
if (this.taskId != null) {
builder.setTaskId(convertToProtoFormat(this.taskId));
}
}
private void mergeLocalToProto() {
if (viaProto)
maybeInitBuilder();
mergeLocalToBuilder();
proto = builder.build();
viaProto = true;
}
private void maybeInitBuilder() {
if (viaProto || builder == null) {
builder = GetTaskReportRequestProto.newBuilder(proto);
}
viaProto = false;
}
@Override
public TaskId getTaskId() {
GetTaskReportRequestProtoOrBuilder p = viaProto ? proto : builder;
if (this.taskId != null) {
return this.taskId;
}
if (!p.hasTaskId()) {
return null;
}
this.taskId = convertFromProtoFormat(p.getTaskId());
return this.taskId;
}
@Override
public void setTaskId(TaskId taskId) {
maybeInitBuilder();
if (taskId == null)
builder.clearTaskId();
this.taskId = taskId;
}
private TaskIdPBImpl convertFromProtoFormat(TaskIdProto p) {
return new TaskIdPBImpl(p);
}
private TaskIdProto convertToProtoFormat(TaskId t) {
return ((TaskIdPBImpl)t).getProto();
}
}
| 3,282 | 28.845455 | 118 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/CounterGroup.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.api.records;
import java.util.Map;
public interface CounterGroup {
public abstract String getName();
public abstract String getDisplayName();
public abstract Map<String, Counter> getAllCounters();
public abstract Counter getCounter(String key);
public abstract void setName(String name);
public abstract void setDisplayName(String displayName);
public abstract void addAllCounters(Map<String, Counter> counters);
public abstract void setCounter(String key, Counter value);
public abstract void removeCounter(String key);
public abstract void clearCounters();
}
| 1,439 | 36.894737 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/package-info.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
@InterfaceAudience.Private
package org.apache.hadoop.mapreduce.v2.api.records;
import org.apache.hadoop.classification.InterfaceAudience;
| 944 | 44 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/TaskType.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.api.records;
public enum TaskType {
MAP, REDUCE
}
| 884 | 35.875 | 74 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/TaskReport.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.api.records;
import java.util.List;
public interface TaskReport {
public abstract TaskId getTaskId();
public abstract TaskState getTaskState();
public abstract float getProgress();
public abstract String getStatus();
public abstract long getStartTime();
public abstract long getFinishTime();
public abstract Counters getCounters();
public abstract org.apache.hadoop.mapreduce.Counters getRawCounters();
public abstract List<TaskAttemptId> getRunningAttemptsList();
public abstract TaskAttemptId getRunningAttempt(int index);
public abstract int getRunningAttemptsCount();
public abstract TaskAttemptId getSuccessfulAttempt();
public abstract List<String> getDiagnosticsList();
public abstract String getDiagnostics(int index);
public abstract int getDiagnosticsCount();
public abstract void setTaskId(TaskId taskId);
public abstract void setTaskState(TaskState taskState);
public abstract void setProgress(float progress);
public abstract void setStatus(String status);
public abstract void setStartTime(long startTime);
public abstract void setFinishTime(long finishTime);
public abstract void setCounters(Counters counters);
public abstract void
setRawCounters(org.apache.hadoop.mapreduce.Counters rCounters);
public abstract void addAllRunningAttempts(List<TaskAttemptId> taskAttempts);
public abstract void addRunningAttempt(TaskAttemptId taskAttempt);
public abstract void removeRunningAttempt(int index);
public abstract void clearRunningAttempts();
public abstract void setSuccessfulAttempt(TaskAttemptId taskAttempt)
;
public abstract void addAllDiagnostics(List<String> diagnostics);
public abstract void addDiagnostics(String diagnostics);
public abstract void removeDiagnostics(int index);
public abstract void clearDiagnostics();
}
| 2,679 | 40.230769 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/Phase.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.api.records;
public enum Phase {
STARTING, MAP, SHUFFLE, SORT, REDUCE, CLEANUP
}
| 931 | 37.833333 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/TaskAttemptReport.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.api.records;
import org.apache.hadoop.yarn.api.records.ContainerId;
public interface TaskAttemptReport {
public abstract TaskAttemptId getTaskAttemptId();
public abstract TaskAttemptState getTaskAttemptState();
public abstract float getProgress();
public abstract long getStartTime();
public abstract long getFinishTime();
/** @return the shuffle finish time. Applicable only for reduce attempts */
public abstract long getShuffleFinishTime();
/** @return the sort/merge finish time. Applicable only for reduce attempts */
public abstract long getSortFinishTime();
public abstract Counters getCounters();
public abstract org.apache.hadoop.mapreduce.Counters getRawCounters();
public abstract String getDiagnosticInfo();
public abstract String getStateString();
public abstract Phase getPhase();
public abstract String getNodeManagerHost();
public abstract int getNodeManagerPort();
public abstract int getNodeManagerHttpPort();
public abstract ContainerId getContainerId();
public abstract void setTaskAttemptId(TaskAttemptId taskAttemptId);
public abstract void setTaskAttemptState(TaskAttemptState taskAttemptState);
public abstract void setProgress(float progress);
public abstract void setStartTime(long startTime);
public abstract void setFinishTime(long finishTime);
public abstract void setCounters(Counters counters);
public abstract void
setRawCounters(org.apache.hadoop.mapreduce.Counters rCounters);
public abstract void setDiagnosticInfo(String diagnosticInfo);
public abstract void setStateString(String stateString);
public abstract void setPhase(Phase phase);
public abstract void setNodeManagerHost(String nmHost);
public abstract void setNodeManagerPort(int nmPort);
public abstract void setNodeManagerHttpPort(int nmHttpPort);
public abstract void setContainerId(ContainerId containerId);
/**
* Set the shuffle finish time. Applicable only for reduce attempts
* @param time the time the shuffle finished.
*/
public abstract void setShuffleFinishTime(long time);
/**
* Set the sort/merge finish time. Applicable only for reduce attempts
* @param time the time the shuffle finished.
*/
public abstract void setSortFinishTime(long time);
}
| 3,106 | 43.385714 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/TaskAttemptState.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.api.records;
public enum TaskAttemptState {
NEW,
STARTING,
RUNNING,
COMMIT_PENDING,
SUCCEEDED,
FAILED,
KILLED
}
| 979 | 31.666667 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/JobReport.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.api.records;
import java.util.List;
public interface JobReport {
public abstract JobId getJobId();
public abstract JobState getJobState();
public abstract float getMapProgress();
public abstract float getReduceProgress();
public abstract float getCleanupProgress();
public abstract float getSetupProgress();
public abstract long getSubmitTime();
public abstract long getStartTime();
public abstract long getFinishTime();
public abstract String getUser();
public abstract String getJobName();
public abstract String getTrackingUrl();
public abstract String getDiagnostics();
public abstract String getJobFile();
public abstract List<AMInfo> getAMInfos();
public abstract boolean isUber();
public abstract void setJobId(JobId jobId);
public abstract void setJobState(JobState jobState);
public abstract void setMapProgress(float progress);
public abstract void setReduceProgress(float progress);
public abstract void setCleanupProgress(float progress);
public abstract void setSetupProgress(float progress);
public abstract void setSubmitTime(long submitTime);
public abstract void setStartTime(long startTime);
public abstract void setFinishTime(long finishTime);
public abstract void setUser(String user);
public abstract void setJobName(String jobName);
public abstract void setTrackingUrl(String trackingUrl);
public abstract void setDiagnostics(String diagnostics);
public abstract void setJobFile(String jobFile);
public abstract void setAMInfos(List<AMInfo> amInfos);
public abstract void setIsUber(boolean isUber);
}
| 2,437 | 41.034483 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/Counter.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.api.records;
public interface Counter {
public abstract String getName();
public abstract String getDisplayName();
public abstract long getValue();
public abstract void setName(String name);
public abstract void setDisplayName(String displayName);
public abstract void setValue(long value);
}
| 1,156 | 37.566667 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/JobState.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.api.records;
public enum JobState {
NEW,
INITED,
RUNNING,
SUCCEEDED,
FAILED,
KILLED,
ERROR
}
| 955 | 30.866667 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/TaskState.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.api.records;
public enum TaskState {
NEW, SCHEDULED, RUNNING, SUCCEEDED, FAILED, KILLED
}
| 940 | 38.208333 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/JobId.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.api.records;
import java.text.NumberFormat;
import org.apache.hadoop.yarn.api.records.ApplicationId;
/**
* <p><code>JobId</code> represents the <em>globally unique</em>
* identifier for a MapReduce job.</p>
*
* <p>The globally unique nature of the identifier is achieved by using the
* <em>cluster timestamp</em> from the associated ApplicationId. i.e.
* start-time of the <code>ResourceManager</code> along with a monotonically
* increasing counter for the jobId.</p>
*/
public abstract class JobId implements Comparable<JobId> {
/**
* Get the associated <em>ApplicationId</em> which represents the
* start time of the <code>ResourceManager</code> and is used to generate
* the globally unique <code>JobId</code>.
* @return associated <code>ApplicationId</code>
*/
public abstract ApplicationId getAppId();
/**
* Get the short integer identifier of the <code>JobId</code>
* which is unique for all applications started by a particular instance
* of the <code>ResourceManager</code>.
* @return short integer identifier of the <code>JobId</code>
*/
public abstract int getId();
public abstract void setAppId(ApplicationId appId);
public abstract void setId(int id);
protected static final String JOB = "job";
protected static final char SEPARATOR = '_';
static final ThreadLocal<NumberFormat> jobIdFormat =
new ThreadLocal<NumberFormat>() {
@Override
public NumberFormat initialValue() {
NumberFormat fmt = NumberFormat.getInstance();
fmt.setGroupingUsed(false);
fmt.setMinimumIntegerDigits(4);
return fmt;
}
};
@Override
public String toString() {
StringBuilder builder = new StringBuilder(JOB);
builder.append(SEPARATOR);
builder.append(getAppId().getClusterTimestamp());
builder.append(SEPARATOR);
builder.append(jobIdFormat.get().format(getId()));
return builder.toString();
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + getAppId().hashCode();
result = prime * result + getId();
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
JobId other = (JobId) obj;
if (!this.getAppId().equals(other.getAppId()))
return false;
if (this.getId() != other.getId())
return false;
return true;
}
@Override
public int compareTo(JobId other) {
int appIdComp = this.getAppId().compareTo(other.getAppId());
if (appIdComp == 0) {
return this.getId() - other.getId();
} else {
return appIdComp;
}
}
}
| 3,612 | 31.258929 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/TaskAttemptCompletionEvent.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.api.records;
public interface TaskAttemptCompletionEvent {
public abstract TaskAttemptId getAttemptId();
public abstract TaskAttemptCompletionEventStatus getStatus();
public abstract String getMapOutputServerAddress();
public abstract int getAttemptRunTime();
public abstract int getEventId();
public abstract void setAttemptId(TaskAttemptId taskAttemptId);
public abstract void setStatus(TaskAttemptCompletionEventStatus status);
public abstract void setMapOutputServerAddress(String address);
public abstract void setAttemptRunTime(int runTime);
public abstract void setEventId(int eventId);
}
| 1,467 | 42.176471 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/Counters.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.api.records;
import java.util.Map;
public interface Counters {
public abstract Map<String, CounterGroup> getAllCounterGroups();
public abstract CounterGroup getCounterGroup(String key);
public abstract Counter getCounter(Enum<?> key);
public abstract void addAllCounterGroups(Map<String, CounterGroup> counterGroups);
public abstract void setCounterGroup(String key, CounterGroup value);
public abstract void removeCounterGroup(String key);
public abstract void clearCounterGroups();
public abstract void incrCounter(Enum<?> key, long amount);
}
| 1,417 | 39.514286 | 84 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/TaskId.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.api.records;
import java.text.NumberFormat;
/**
* <p>
* <code>TaskId</code> represents the unique identifier for a Map or Reduce
* Task.
* </p>
*
* <p>
* TaskId consists of 3 parts. First part is <code>JobId</code>, that this Task
* belongs to. Second part of the TaskId is either 'm' or 'r' representing
* whether the task is a map task or a reduce task. And the third part is the
* task number.
* </p>
*/
public abstract class TaskId implements Comparable<TaskId> {
/**
* @return the associated <code>JobId</code>
*/
public abstract JobId getJobId();
/**
* @return the type of the task - MAP/REDUCE
*/
public abstract TaskType getTaskType();
/**
* @return the task number.
*/
public abstract int getId();
public abstract void setJobId(JobId jobId);
public abstract void setTaskType(TaskType taskType);
public abstract void setId(int id);
protected static final String TASK = "task";
static final ThreadLocal<NumberFormat> taskIdFormat =
new ThreadLocal<NumberFormat>() {
@Override
public NumberFormat initialValue() {
NumberFormat fmt = NumberFormat.getInstance();
fmt.setGroupingUsed(false);
fmt.setMinimumIntegerDigits(6);
return fmt;
}
};
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + getId();
result = prime * result + getJobId().hashCode();
result = prime * result + getTaskType().hashCode();
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
TaskId other = (TaskId) obj;
if (getId() != other.getId())
return false;
if (!getJobId().equals(other.getJobId()))
return false;
if (getTaskType() != other.getTaskType())
return false;
return true;
}
@Override
public String toString() {
StringBuilder builder = new StringBuilder(TASK);
JobId jobId = getJobId();
builder.append("_").append(jobId.getAppId().getClusterTimestamp());
builder.append("_").append(
JobId.jobIdFormat.get().format(jobId.getAppId().getId()));
builder.append("_");
builder.append(getTaskType() == TaskType.MAP ? "m" : "r").append("_");
builder.append(taskIdFormat.get().format(getId()));
return builder.toString();
}
@Override
public int compareTo(TaskId other) {
int jobIdComp = this.getJobId().compareTo(other.getJobId());
if (jobIdComp == 0) {
if (this.getTaskType() == other.getTaskType()) {
return this.getId() - other.getId();
} else {
return this.getTaskType().compareTo(other.getTaskType());
}
} else {
return jobIdComp;
}
}
}
| 3,689 | 28.285714 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/Locality.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.api.records;
public enum Locality {
NODE_LOCAL,
RACK_LOCAL,
OFF_SWITCH
}
| 927 | 34.692308 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/TaskAttemptCompletionEventStatus.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.api.records;
public enum TaskAttemptCompletionEventStatus {
FAILED,
KILLED,
SUCCEEDED,
OBSOLETE,
TIPFAILED
}
| 967 | 33.571429 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/AMInfo.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.api.records;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ContainerId;
public interface AMInfo {
public ApplicationAttemptId getAppAttemptId();
public long getStartTime();
public ContainerId getContainerId();
public String getNodeManagerHost();
public int getNodeManagerPort();
public int getNodeManagerHttpPort();
public void setAppAttemptId(ApplicationAttemptId appAttemptId);
public void setStartTime(long startTime);
public void setContainerId(ContainerId containerId);
public void setNodeManagerHost(String nmHost);
public void setNodeManagerPort(int nmPort);
public void setNodeManagerHttpPort(int mnHttpPort);
}
| 1,553 | 39.894737 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/TaskAttemptId.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.api.records;
/**
* <p>
* <code>TaskAttemptId</code> represents the unique identifier for a task
* attempt. Each task attempt is one particular instance of a Map or Reduce Task
* identified by its TaskId.
* </p>
*
* <p>
* TaskAttemptId consists of 2 parts. First part is the <code>TaskId</code>,
* that this <code>TaskAttemptId</code> belongs to. Second part is the task
* attempt number.
* </p>
*/
public abstract class TaskAttemptId implements Comparable<TaskAttemptId> {
/**
* @return the associated TaskId.
*/
public abstract TaskId getTaskId();
/**
* @return the attempt id.
*/
public abstract int getId();
public abstract void setTaskId(TaskId taskId);
public abstract void setId(int id);
protected static final String TASKATTEMPT = "attempt";
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + getId();
result =
prime * result + ((getTaskId() == null) ? 0 : getTaskId().hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
TaskAttemptId other = (TaskAttemptId) obj;
if (getId() != other.getId())
return false;
if (!getTaskId().equals(other.getTaskId()))
return false;
return true;
}
@Override
public String toString() {
StringBuilder builder = new StringBuilder(TASKATTEMPT);
TaskId taskId = getTaskId();
builder.append("_").append(
taskId.getJobId().getAppId().getClusterTimestamp());
builder.append("_").append(
JobId.jobIdFormat.get().format(
getTaskId().getJobId().getAppId().getId()));
builder.append("_");
builder.append(taskId.getTaskType() == TaskType.MAP ? "m" : "r");
builder.append("_")
.append(TaskId.taskIdFormat.get().format(taskId.getId()));
builder.append("_");
builder.append(getId());
return builder.toString();
}
@Override
public int compareTo(TaskAttemptId other) {
int taskIdComp = this.getTaskId().compareTo(other.getTaskId());
if (taskIdComp == 0) {
return this.getId() - other.getId();
} else {
return taskIdComp;
}
}
}
| 3,137 | 29.173077 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/Avataar.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.api.records;
public enum Avataar {
VIRGIN,
SPECULATIVE
}
| 909 | 35.4 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/impl/pb/package-info.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
@InterfaceAudience.Private
package org.apache.hadoop.mapreduce.v2.api.records.impl.pb;
import org.apache.hadoop.classification.InterfaceAudience;
| 952 | 44.380952 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/impl/pb/CounterGroupPBImpl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.api.records.impl.pb;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import org.apache.hadoop.mapreduce.v2.api.records.Counter;
import org.apache.hadoop.mapreduce.v2.api.records.CounterGroup;
import org.apache.hadoop.mapreduce.v2.proto.MRProtos.CounterGroupProto;
import org.apache.hadoop.mapreduce.v2.proto.MRProtos.CounterGroupProtoOrBuilder;
import org.apache.hadoop.mapreduce.v2.proto.MRProtos.CounterProto;
import org.apache.hadoop.mapreduce.v2.proto.MRProtos.StringCounterMapProto;
import org.apache.hadoop.yarn.api.records.impl.pb.ProtoBase;
public class CounterGroupPBImpl extends ProtoBase<CounterGroupProto> implements CounterGroup {
CounterGroupProto proto = CounterGroupProto.getDefaultInstance();
CounterGroupProto.Builder builder = null;
boolean viaProto = false;
private Map<String, Counter> counters = null;
public CounterGroupPBImpl() {
builder = CounterGroupProto.newBuilder();
}
public CounterGroupPBImpl(CounterGroupProto proto) {
this.proto = proto;
viaProto = true;
}
public CounterGroupProto getProto() {
mergeLocalToProto();
proto = viaProto ? proto : builder.build();
viaProto = true;
return proto;
}
private void mergeLocalToBuilder() {
if (this.counters != null) {
addContersToProto();
}
}
private void mergeLocalToProto() {
if (viaProto)
maybeInitBuilder();
mergeLocalToBuilder();
proto = builder.build();
viaProto = true;
}
private void maybeInitBuilder() {
if (viaProto || builder == null) {
builder = CounterGroupProto.newBuilder(proto);
}
viaProto = false;
}
@Override
public String getName() {
CounterGroupProtoOrBuilder p = viaProto ? proto : builder;
if (!p.hasName()) {
return null;
}
return (p.getName());
}
@Override
public void setName(String name) {
maybeInitBuilder();
if (name == null) {
builder.clearName();
return;
}
builder.setName((name));
}
@Override
public String getDisplayName() {
CounterGroupProtoOrBuilder p = viaProto ? proto : builder;
if (!p.hasDisplayName()) {
return null;
}
return (p.getDisplayName());
}
@Override
public void setDisplayName(String displayName) {
maybeInitBuilder();
if (displayName == null) {
builder.clearDisplayName();
return;
}
builder.setDisplayName((displayName));
}
@Override
public Map<String, Counter> getAllCounters() {
initCounters();
return this.counters;
}
@Override
public Counter getCounter(String key) {
initCounters();
return this.counters.get(key);
}
private void initCounters() {
if (this.counters != null) {
return;
}
CounterGroupProtoOrBuilder p = viaProto ? proto : builder;
List<StringCounterMapProto> list = p.getCountersList();
this.counters = new HashMap<String, Counter>();
for (StringCounterMapProto c : list) {
this.counters.put(c.getKey(), convertFromProtoFormat(c.getValue()));
}
}
@Override
public void addAllCounters(final Map<String, Counter> counters) {
if (counters == null)
return;
initCounters();
this.counters.putAll(counters);
}
private void addContersToProto() {
maybeInitBuilder();
builder.clearCounters();
if (counters == null)
return;
Iterable<StringCounterMapProto> iterable = new Iterable<StringCounterMapProto>() {
@Override
public Iterator<StringCounterMapProto> iterator() {
return new Iterator<StringCounterMapProto>() {
Iterator<String> keyIter = counters.keySet().iterator();
@Override
public void remove() {
throw new UnsupportedOperationException();
}
@Override
public StringCounterMapProto next() {
String key = keyIter.next();
return StringCounterMapProto.newBuilder().setKey(key).setValue(convertToProtoFormat(counters.get(key))).build();
}
@Override
public boolean hasNext() {
return keyIter.hasNext();
}
};
}
};
builder.addAllCounters(iterable);
}
@Override
public void setCounter(String key, Counter val) {
initCounters();
this.counters.put(key, val);
}
@Override
public void removeCounter(String key) {
initCounters();
this.counters.remove(key);
}
@Override
public void clearCounters() {
initCounters();
this.counters.clear();
}
private CounterPBImpl convertFromProtoFormat(CounterProto p) {
return new CounterPBImpl(p);
}
private CounterProto convertToProtoFormat(Counter t) {
return ((CounterPBImpl)t).getProto();
}
}
| 5,665 | 26.240385 | 124 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/impl/pb/TaskAttemptIdPBImpl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.api.records.impl.pb;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
import org.apache.hadoop.mapreduce.v2.proto.MRProtos.TaskAttemptIdProto;
import org.apache.hadoop.mapreduce.v2.proto.MRProtos.TaskAttemptIdProtoOrBuilder;
import org.apache.hadoop.mapreduce.v2.proto.MRProtos.TaskIdProto;
public class TaskAttemptIdPBImpl extends TaskAttemptId {
TaskAttemptIdProto proto = TaskAttemptIdProto.getDefaultInstance();
TaskAttemptIdProto.Builder builder = null;
boolean viaProto = false;
private TaskId taskId = null;
public TaskAttemptIdPBImpl() {
builder = TaskAttemptIdProto.newBuilder();
}
public TaskAttemptIdPBImpl(TaskAttemptIdProto proto) {
this.proto = proto;
viaProto = true;
}
public synchronized TaskAttemptIdProto getProto() {
mergeLocalToProto();
proto = viaProto ? proto : builder.build();
viaProto = true;
return proto;
}
private synchronized void mergeLocalToBuilder() {
if (this.taskId != null
&& !((TaskIdPBImpl) this.taskId).getProto().equals(builder.getTaskId())) {
builder.setTaskId(convertToProtoFormat(this.taskId));
}
}
private synchronized void mergeLocalToProto() {
if (viaProto)
maybeInitBuilder();
mergeLocalToBuilder();
proto = builder.build();
viaProto = true;
}
private synchronized void maybeInitBuilder() {
if (viaProto || builder == null) {
builder = TaskAttemptIdProto.newBuilder(proto);
}
viaProto = false;
}
@Override
public synchronized int getId() {
TaskAttemptIdProtoOrBuilder p = viaProto ? proto : builder;
return (p.getId());
}
@Override
public synchronized void setId(int id) {
maybeInitBuilder();
builder.setId((id));
}
@Override
public synchronized TaskId getTaskId() {
TaskAttemptIdProtoOrBuilder p = viaProto ? proto : builder;
if (this.taskId != null) {
return this.taskId;
}
if (!p.hasTaskId()) {
return null;
}
taskId = convertFromProtoFormat(p.getTaskId());
return taskId;
}
@Override
public synchronized void setTaskId(TaskId taskId) {
maybeInitBuilder();
if (taskId == null)
builder.clearTaskId();
this.taskId = taskId;
}
private TaskIdPBImpl convertFromProtoFormat(TaskIdProto p) {
return new TaskIdPBImpl(p);
}
private TaskIdProto convertToProtoFormat(TaskId t) {
return ((TaskIdPBImpl)t).getProto();
}
}
| 3,355 | 28.438596 | 82 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/impl/pb/JobReportPBImpl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.api.records.impl.pb;
import java.util.ArrayList;
import java.util.List;
import org.apache.hadoop.mapreduce.v2.api.records.AMInfo;
import org.apache.hadoop.mapreduce.v2.api.records.JobId;
import org.apache.hadoop.mapreduce.v2.api.records.JobReport;
import org.apache.hadoop.mapreduce.v2.api.records.JobState;
import org.apache.hadoop.mapreduce.v2.proto.MRProtos.AMInfoProto;
import org.apache.hadoop.mapreduce.v2.proto.MRProtos.JobIdProto;
import org.apache.hadoop.mapreduce.v2.proto.MRProtos.JobReportProto;
import org.apache.hadoop.mapreduce.v2.proto.MRProtos.JobReportProtoOrBuilder;
import org.apache.hadoop.mapreduce.v2.proto.MRProtos.JobStateProto;
import org.apache.hadoop.mapreduce.v2.util.MRProtoUtils;
import org.apache.hadoop.yarn.api.records.impl.pb.ProtoBase;
public class JobReportPBImpl extends ProtoBase<JobReportProto> implements
JobReport {
JobReportProto proto = JobReportProto.getDefaultInstance();
JobReportProto.Builder builder = null;
boolean viaProto = false;
private JobId jobId = null;
private List<AMInfo> amInfos = null;
public JobReportPBImpl() {
builder = JobReportProto.newBuilder();
}
public JobReportPBImpl(JobReportProto proto) {
this.proto = proto;
viaProto = true;
}
public synchronized JobReportProto getProto() {
mergeLocalToProto();
proto = viaProto ? proto : builder.build();
viaProto = true;
return proto;
}
private synchronized void mergeLocalToBuilder() {
if (this.jobId != null) {
builder.setJobId(convertToProtoFormat(this.jobId));
}
if (this.amInfos != null) {
addAMInfosToProto();
}
}
private synchronized void mergeLocalToProto() {
if (viaProto)
maybeInitBuilder();
mergeLocalToBuilder();
proto = builder.build();
viaProto = true;
}
private synchronized void maybeInitBuilder() {
if (viaProto || builder == null) {
builder = JobReportProto.newBuilder(proto);
}
viaProto = false;
}
@Override
public synchronized JobId getJobId() {
JobReportProtoOrBuilder p = viaProto ? proto : builder;
if (this.jobId != null) {
return this.jobId;
}
if (!p.hasJobId()) {
return null;
}
this.jobId = convertFromProtoFormat(p.getJobId());
return this.jobId;
}
@Override
public synchronized void setJobId(JobId jobId) {
maybeInitBuilder();
if (jobId == null)
builder.clearJobId();
this.jobId = jobId;
}
@Override
public synchronized JobState getJobState() {
JobReportProtoOrBuilder p = viaProto ? proto : builder;
if (!p.hasJobState()) {
return null;
}
return convertFromProtoFormat(p.getJobState());
}
@Override
public synchronized void setJobState(JobState jobState) {
maybeInitBuilder();
if (jobState == null) {
builder.clearJobState();
return;
}
builder.setJobState(convertToProtoFormat(jobState));
}
@Override
public synchronized float getMapProgress() {
JobReportProtoOrBuilder p = viaProto ? proto : builder;
return (p.getMapProgress());
}
@Override
public synchronized void setMapProgress(float mapProgress) {
maybeInitBuilder();
builder.setMapProgress((mapProgress));
}
@Override
public synchronized float getReduceProgress() {
JobReportProtoOrBuilder p = viaProto ? proto : builder;
return (p.getReduceProgress());
}
@Override
public synchronized void setReduceProgress(float reduceProgress) {
maybeInitBuilder();
builder.setReduceProgress((reduceProgress));
}
@Override
public synchronized float getCleanupProgress() {
JobReportProtoOrBuilder p = viaProto ? proto : builder;
return (p.getCleanupProgress());
}
@Override
public synchronized void setCleanupProgress(float cleanupProgress) {
maybeInitBuilder();
builder.setCleanupProgress((cleanupProgress));
}
@Override
public synchronized float getSetupProgress() {
JobReportProtoOrBuilder p = viaProto ? proto : builder;
return (p.getSetupProgress());
}
@Override
public synchronized void setSetupProgress(float setupProgress) {
maybeInitBuilder();
builder.setSetupProgress((setupProgress));
}
@Override
public synchronized long getSubmitTime() {
JobReportProtoOrBuilder p = viaProto ? proto : builder;
return (p.getSubmitTime());
}
@Override
public synchronized void setSubmitTime(long submitTime) {
maybeInitBuilder();
builder.setSubmitTime((submitTime));
}
@Override
public synchronized long getStartTime() {
JobReportProtoOrBuilder p = viaProto ? proto : builder;
return (p.getStartTime());
}
@Override
public synchronized void setStartTime(long startTime) {
maybeInitBuilder();
builder.setStartTime((startTime));
}
@Override
public synchronized long getFinishTime() {
JobReportProtoOrBuilder p = viaProto ? proto : builder;
return (p.getFinishTime());
}
@Override
public synchronized void setFinishTime(long finishTime) {
maybeInitBuilder();
builder.setFinishTime((finishTime));
}
@Override
public synchronized String getUser() {
JobReportProtoOrBuilder p = viaProto ? proto : builder;
return (p.getUser());
}
@Override
public synchronized void setUser(String user) {
maybeInitBuilder();
builder.setUser((user));
}
@Override
public synchronized String getJobName() {
JobReportProtoOrBuilder p = viaProto ? proto : builder;
return (p.getJobName());
}
@Override
public synchronized void setJobName(String jobName) {
maybeInitBuilder();
builder.setJobName((jobName));
}
@Override
public synchronized String getTrackingUrl() {
JobReportProtoOrBuilder p = viaProto ? proto : builder;
return (p.getTrackingUrl());
}
@Override
public synchronized void setTrackingUrl(String trackingUrl) {
maybeInitBuilder();
builder.setTrackingUrl(trackingUrl);
}
@Override
public synchronized String getDiagnostics() {
JobReportProtoOrBuilder p = viaProto ? proto : builder;
return p.getDiagnostics();
}
@Override
public synchronized void setDiagnostics(String diagnostics) {
maybeInitBuilder();
builder.setDiagnostics(diagnostics);
}
@Override
public synchronized String getJobFile() {
JobReportProtoOrBuilder p = viaProto ? proto : builder;
return p.getJobFile();
}
@Override
public synchronized void setJobFile(String jobFile) {
maybeInitBuilder();
builder.setJobFile(jobFile);
}
@Override
public synchronized List<AMInfo> getAMInfos() {
initAMInfos();
return this.amInfos;
}
@Override
public synchronized void setAMInfos(List<AMInfo> amInfos) {
maybeInitBuilder();
if (amInfos == null) {
this.builder.clearAmInfos();
this.amInfos = null;
return;
}
initAMInfos();
this.amInfos.clear();
this.amInfos.addAll(amInfos);
}
private synchronized void initAMInfos() {
if (this.amInfos != null) {
return;
}
JobReportProtoOrBuilder p = viaProto ? proto : builder;
List<AMInfoProto> list = p.getAmInfosList();
this.amInfos = new ArrayList<AMInfo>();
for (AMInfoProto amInfoProto : list) {
this.amInfos.add(convertFromProtoFormat(amInfoProto));
}
}
private synchronized void addAMInfosToProto() {
maybeInitBuilder();
builder.clearAmInfos();
if (this.amInfos == null)
return;
for (AMInfo amInfo : this.amInfos) {
builder.addAmInfos(convertToProtoFormat(amInfo));
}
}
private AMInfoPBImpl convertFromProtoFormat(AMInfoProto p) {
return new AMInfoPBImpl(p);
}
private AMInfoProto convertToProtoFormat(AMInfo t) {
return ((AMInfoPBImpl)t).getProto();
}
private JobIdPBImpl convertFromProtoFormat(JobIdProto p) {
return new JobIdPBImpl(p);
}
private JobIdProto convertToProtoFormat(JobId t) {
return ((JobIdPBImpl)t).getProto();
}
private JobStateProto convertToProtoFormat(JobState e) {
return MRProtoUtils.convertToProtoFormat(e);
}
private JobState convertFromProtoFormat(JobStateProto e) {
return MRProtoUtils.convertFromProtoFormat(e);
}
@Override
public synchronized boolean isUber() {
JobReportProtoOrBuilder p = viaProto ? proto : builder;
return p.getIsUber();
}
@Override
public synchronized void setIsUber(boolean isUber) {
maybeInitBuilder();
builder.setIsUber(isUber);
}
}
| 9,300 | 25.727011 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/impl/pb/JobIdPBImpl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.api.records.impl.pb;
import org.apache.hadoop.mapreduce.v2.api.records.JobId;
import org.apache.hadoop.mapreduce.v2.proto.MRProtos.JobIdProto;
import org.apache.hadoop.mapreduce.v2.proto.MRProtos.JobIdProtoOrBuilder;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationIdPBImpl;
import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationIdProto;
public class JobIdPBImpl extends JobId {
JobIdProto proto = JobIdProto.getDefaultInstance();
JobIdProto.Builder builder = null;
boolean viaProto = false;
private ApplicationId applicationId = null;
public JobIdPBImpl() {
builder = JobIdProto.newBuilder();
}
public JobIdPBImpl(JobIdProto proto) {
this.proto = proto;
viaProto = true;
}
public synchronized JobIdProto getProto() {
mergeLocalToProto();
proto = viaProto ? proto : builder.build();
viaProto = true;
return proto;
}
private synchronized void mergeLocalToBuilder() {
if (this.applicationId != null
&& !((ApplicationIdPBImpl) this.applicationId).getProto().equals(
builder.getAppId())) {
builder.setAppId(convertToProtoFormat(this.applicationId));
}
}
private synchronized void mergeLocalToProto() {
if (viaProto)
maybeInitBuilder();
mergeLocalToBuilder();
proto = builder.build();
viaProto = true;
}
private synchronized void maybeInitBuilder() {
if (viaProto || builder == null) {
builder = JobIdProto.newBuilder(proto);
}
viaProto = false;
}
@Override
public synchronized ApplicationId getAppId() {
JobIdProtoOrBuilder p = viaProto ? proto : builder;
if (applicationId != null) {
return applicationId;
} // Else via proto
if (!p.hasAppId()) {
return null;
}
applicationId = convertFromProtoFormat(p.getAppId());
return applicationId;
}
@Override
public synchronized void setAppId(ApplicationId appId) {
maybeInitBuilder();
if (appId == null) {
builder.clearAppId();
}
this.applicationId = appId;
}
@Override
public synchronized int getId() {
JobIdProtoOrBuilder p = viaProto ? proto : builder;
return (p.getId());
}
@Override
public synchronized void setId(int id) {
maybeInitBuilder();
builder.setId((id));
}
private ApplicationIdPBImpl convertFromProtoFormat(
ApplicationIdProto p) {
return new ApplicationIdPBImpl(p);
}
private ApplicationIdProto convertToProtoFormat(ApplicationId t) {
return ((ApplicationIdPBImpl) t).getProto();
}
}
| 3,431 | 28.333333 | 74 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/impl/pb/CounterPBImpl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.api.records.impl.pb;
import org.apache.hadoop.mapreduce.v2.api.records.Counter;
import org.apache.hadoop.mapreduce.v2.proto.MRProtos.CounterProto;
import org.apache.hadoop.mapreduce.v2.proto.MRProtos.CounterProtoOrBuilder;
import org.apache.hadoop.yarn.api.records.impl.pb.ProtoBase;
public class CounterPBImpl extends ProtoBase<CounterProto> implements Counter {
CounterProto proto = CounterProto.getDefaultInstance();
CounterProto.Builder builder = null;
boolean viaProto = false;
public CounterPBImpl() {
builder = CounterProto.newBuilder();
}
public CounterPBImpl(CounterProto proto) {
this.proto = proto;
viaProto = true;
}
public CounterProto getProto() {
proto = viaProto ? proto : builder.build();
viaProto = true;
return proto;
}
private void maybeInitBuilder() {
if (viaProto || builder == null) {
builder = CounterProto.newBuilder(proto);
}
viaProto = false;
}
@Override
public String getName() {
CounterProtoOrBuilder p = viaProto ? proto : builder;
if (!p.hasName()) {
return null;
}
return (p.getName());
}
@Override
public void setName(String name) {
maybeInitBuilder();
if (name == null) {
builder.clearName();
return;
}
builder.setName((name));
}
@Override
public long getValue() {
CounterProtoOrBuilder p = viaProto ? proto : builder;
return (p.getValue());
}
@Override
public void setValue(long value) {
maybeInitBuilder();
builder.setValue((value));
}
@Override
public String getDisplayName() {
CounterProtoOrBuilder p = viaProto ? proto : builder;
if (!p.hasDisplayName()) {
return null;
}
return (p.getDisplayName());
}
@Override
public void setDisplayName(String displayName) {
maybeInitBuilder();
if (displayName == null) {
builder.clearDisplayName();
return;
}
builder.setDisplayName((displayName));
}
}
| 2,825 | 25.166667 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/impl/pb/TaskReportPBImpl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.api.records.impl.pb;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
import org.apache.hadoop.mapreduce.TypeConverter;
import org.apache.hadoop.mapreduce.v2.api.records.Counters;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
import org.apache.hadoop.mapreduce.v2.api.records.TaskReport;
import org.apache.hadoop.mapreduce.v2.api.records.TaskState;
import org.apache.hadoop.mapreduce.v2.proto.MRProtos.CountersProto;
import org.apache.hadoop.mapreduce.v2.proto.MRProtos.TaskAttemptIdProto;
import org.apache.hadoop.mapreduce.v2.proto.MRProtos.TaskIdProto;
import org.apache.hadoop.mapreduce.v2.proto.MRProtos.TaskReportProto;
import org.apache.hadoop.mapreduce.v2.proto.MRProtos.TaskReportProtoOrBuilder;
import org.apache.hadoop.mapreduce.v2.proto.MRProtos.TaskStateProto;
import org.apache.hadoop.mapreduce.v2.util.MRProtoUtils;
import org.apache.hadoop.yarn.api.records.impl.pb.ProtoBase;
public class TaskReportPBImpl extends ProtoBase<TaskReportProto> implements TaskReport {
TaskReportProto proto = TaskReportProto.getDefaultInstance();
TaskReportProto.Builder builder = null;
boolean viaProto = false;
private TaskId taskId = null;
private Counters counters = null;
private org.apache.hadoop.mapreduce.Counters rawCounters = null;
private List<TaskAttemptId> runningAttempts = null;
private TaskAttemptId successfulAttemptId = null;
private List<String> diagnostics = null;
private String status;
public TaskReportPBImpl() {
builder = TaskReportProto.newBuilder();
}
public TaskReportPBImpl(TaskReportProto proto) {
this.proto = proto;
viaProto = true;
}
public TaskReportProto getProto() {
mergeLocalToProto();
proto = viaProto ? proto : builder.build();
viaProto = true;
return proto;
}
private void mergeLocalToBuilder() {
if (this.taskId != null) {
builder.setTaskId(convertToProtoFormat(this.taskId));
}
convertRawCountersToCounters();
if (this.counters != null) {
builder.setCounters(convertToProtoFormat(this.counters));
}
if (this.runningAttempts != null) {
addRunningAttemptsToProto();
}
if (this.successfulAttemptId != null) {
builder.setSuccessfulAttempt(convertToProtoFormat(this.successfulAttemptId));
}
if (this.diagnostics != null) {
addDiagnosticsToProto();
}
}
private void mergeLocalToProto() {
if (viaProto)
maybeInitBuilder();
mergeLocalToBuilder();
proto = builder.build();
viaProto = true;
}
private void maybeInitBuilder() {
if (viaProto || builder == null) {
builder = TaskReportProto.newBuilder(proto);
}
viaProto = false;
}
@Override
public Counters getCounters() {
TaskReportProtoOrBuilder p = viaProto ? proto : builder;
convertRawCountersToCounters();
if (this.counters != null) {
return this.counters;
}
if (!p.hasCounters()) {
return null;
}
this.counters = convertFromProtoFormat(p.getCounters());
return this.counters;
}
@Override
public void setCounters(Counters counters) {
maybeInitBuilder();
if (counters == null) {
builder.clearCounters();
}
this.counters = counters;
this.rawCounters = null;
}
@Override
public org.apache.hadoop.mapreduce.Counters
getRawCounters() {
return this.rawCounters;
}
@Override
public void setRawCounters(org.apache.hadoop.mapreduce.Counters rCounters) {
setCounters(null);
this.rawCounters = rCounters;
}
private void convertRawCountersToCounters() {
if (this.counters == null && this.rawCounters != null) {
this.counters = TypeConverter.toYarn(rawCounters);
this.rawCounters = null;
}
}
@Override
public long getStartTime() {
TaskReportProtoOrBuilder p = viaProto ? proto : builder;
return (p.getStartTime());
}
@Override
public void setStartTime(long startTime) {
maybeInitBuilder();
builder.setStartTime((startTime));
}
@Override
public long getFinishTime() {
TaskReportProtoOrBuilder p = viaProto ? proto : builder;
return (p.getFinishTime());
}
@Override
public void setFinishTime(long finishTime) {
maybeInitBuilder();
builder.setFinishTime((finishTime));
}
@Override
public TaskId getTaskId() {
TaskReportProtoOrBuilder p = viaProto ? proto : builder;
if (this.taskId != null) {
return this.taskId;
}
if (!p.hasTaskId()) {
return null;
}
this.taskId = convertFromProtoFormat(p.getTaskId());
return this.taskId;
}
@Override
public void setTaskId(TaskId taskId) {
maybeInitBuilder();
if (taskId == null)
builder.clearTaskId();
this.taskId = taskId;
}
@Override
public float getProgress() {
TaskReportProtoOrBuilder p = viaProto ? proto : builder;
return (p.getProgress());
}
@Override
public String getStatus() {
return status;
}
@Override
public void setProgress(float progress) {
maybeInitBuilder();
builder.setProgress((progress));
}
@Override
public void setStatus(String status) {
this.status = status;
}
@Override
public TaskState getTaskState() {
TaskReportProtoOrBuilder p = viaProto ? proto : builder;
if (!p.hasTaskState()) {
return null;
}
return convertFromProtoFormat(p.getTaskState());
}
@Override
public void setTaskState(TaskState taskState) {
maybeInitBuilder();
if (taskState == null) {
builder.clearTaskState();
return;
}
builder.setTaskState(convertToProtoFormat(taskState));
}
@Override
public List<TaskAttemptId> getRunningAttemptsList() {
initRunningAttempts();
return this.runningAttempts;
}
@Override
public TaskAttemptId getRunningAttempt(int index) {
initRunningAttempts();
return this.runningAttempts.get(index);
}
@Override
public int getRunningAttemptsCount() {
initRunningAttempts();
return this.runningAttempts.size();
}
private void initRunningAttempts() {
if (this.runningAttempts != null) {
return;
}
TaskReportProtoOrBuilder p = viaProto ? proto : builder;
List<TaskAttemptIdProto> list = p.getRunningAttemptsList();
this.runningAttempts = new ArrayList<TaskAttemptId>();
for (TaskAttemptIdProto c : list) {
this.runningAttempts.add(convertFromProtoFormat(c));
}
}
@Override
public void addAllRunningAttempts(final List<TaskAttemptId> runningAttempts) {
if (runningAttempts == null)
return;
initRunningAttempts();
this.runningAttempts.addAll(runningAttempts);
}
private void addRunningAttemptsToProto() {
maybeInitBuilder();
builder.clearRunningAttempts();
if (runningAttempts == null)
return;
Iterable<TaskAttemptIdProto> iterable = new Iterable<TaskAttemptIdProto>() {
@Override
public Iterator<TaskAttemptIdProto> iterator() {
return new Iterator<TaskAttemptIdProto>() {
Iterator<TaskAttemptId> iter = runningAttempts.iterator();
@Override
public boolean hasNext() {
return iter.hasNext();
}
@Override
public TaskAttemptIdProto next() {
return convertToProtoFormat(iter.next());
}
@Override
public void remove() {
throw new UnsupportedOperationException();
}
};
}
};
builder.addAllRunningAttempts(iterable);
}
@Override
public void addRunningAttempt(TaskAttemptId runningAttempts) {
initRunningAttempts();
this.runningAttempts.add(runningAttempts);
}
@Override
public void removeRunningAttempt(int index) {
initRunningAttempts();
this.runningAttempts.remove(index);
}
@Override
public void clearRunningAttempts() {
initRunningAttempts();
this.runningAttempts.clear();
}
@Override
public TaskAttemptId getSuccessfulAttempt() {
TaskReportProtoOrBuilder p = viaProto ? proto : builder;
if (this.successfulAttemptId != null) {
return this.successfulAttemptId;
}
if (!p.hasSuccessfulAttempt()) {
return null;
}
this.successfulAttemptId = convertFromProtoFormat(p.getSuccessfulAttempt());
return this.successfulAttemptId;
}
@Override
public void setSuccessfulAttempt(TaskAttemptId successfulAttempt) {
maybeInitBuilder();
if (successfulAttempt == null)
builder.clearSuccessfulAttempt();
this.successfulAttemptId = successfulAttempt;
}
@Override
public List<String> getDiagnosticsList() {
initDiagnostics();
return this.diagnostics;
}
@Override
public String getDiagnostics(int index) {
initDiagnostics();
return this.diagnostics.get(index);
}
@Override
public int getDiagnosticsCount() {
initDiagnostics();
return this.diagnostics.size();
}
private void initDiagnostics() {
if (this.diagnostics != null) {
return;
}
TaskReportProtoOrBuilder p = viaProto ? proto : builder;
List<String> list = p.getDiagnosticsList();
this.diagnostics = new ArrayList<String>();
for (String c : list) {
this.diagnostics.add(c);
}
}
@Override
public void addAllDiagnostics(final List<String> diagnostics) {
if (diagnostics == null)
return;
initDiagnostics();
this.diagnostics.addAll(diagnostics);
}
private void addDiagnosticsToProto() {
maybeInitBuilder();
builder.clearDiagnostics();
if (diagnostics == null)
return;
builder.addAllDiagnostics(diagnostics);
}
@Override
public void addDiagnostics(String diagnostics) {
initDiagnostics();
this.diagnostics.add(diagnostics);
}
@Override
public void removeDiagnostics(int index) {
initDiagnostics();
this.diagnostics.remove(index);
}
@Override
public void clearDiagnostics() {
initDiagnostics();
this.diagnostics.clear();
}
private CountersPBImpl convertFromProtoFormat(CountersProto p) {
return new CountersPBImpl(p);
}
private CountersProto convertToProtoFormat(Counters t) {
return ((CountersPBImpl)t).getProto();
}
private TaskIdPBImpl convertFromProtoFormat(TaskIdProto p) {
return new TaskIdPBImpl(p);
}
private TaskIdProto convertToProtoFormat(TaskId t) {
return ((TaskIdPBImpl)t).getProto();
}
private TaskStateProto convertToProtoFormat(TaskState e) {
return MRProtoUtils.convertToProtoFormat(e);
}
private TaskState convertFromProtoFormat(TaskStateProto e) {
return MRProtoUtils.convertFromProtoFormat(e);
}
private TaskAttemptIdPBImpl convertFromProtoFormat(TaskAttemptIdProto p) {
return new TaskAttemptIdPBImpl(p);
}
private TaskAttemptIdProto convertToProtoFormat(TaskAttemptId t) {
return ((TaskAttemptIdPBImpl)t).getProto();
}
}
| 11,743 | 26.24826 | 88 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/impl/pb/TaskIdPBImpl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.api.records.impl.pb;
import org.apache.hadoop.mapreduce.v2.api.records.JobId;
import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
import org.apache.hadoop.mapreduce.v2.proto.MRProtos.JobIdProto;
import org.apache.hadoop.mapreduce.v2.proto.MRProtos.TaskIdProto;
import org.apache.hadoop.mapreduce.v2.proto.MRProtos.TaskIdProtoOrBuilder;
import org.apache.hadoop.mapreduce.v2.proto.MRProtos.TaskTypeProto;
import org.apache.hadoop.mapreduce.v2.util.MRProtoUtils;
public class TaskIdPBImpl extends TaskId {
TaskIdProto proto = TaskIdProto.getDefaultInstance();
TaskIdProto.Builder builder = null;
boolean viaProto = false;
private JobId jobId = null;
public TaskIdPBImpl() {
builder = TaskIdProto.newBuilder(proto);
}
public TaskIdPBImpl(TaskIdProto proto) {
this.proto = proto;
viaProto = true;
}
public synchronized TaskIdProto getProto() {
mergeLocalToProto();
proto = viaProto ? proto : builder.build();
viaProto = true;
return proto;
}
private synchronized void mergeLocalToBuilder() {
if (this.jobId != null
&& !((JobIdPBImpl) this.jobId).getProto().equals(builder.getJobId())) {
builder.setJobId(convertToProtoFormat(this.jobId));
}
}
private synchronized void mergeLocalToProto() {
if (viaProto)
maybeInitBuilder();
mergeLocalToBuilder();
proto = builder.build();
viaProto = true;
}
private synchronized void maybeInitBuilder() {
if (viaProto || builder == null) {
builder = TaskIdProto.newBuilder(proto);
}
viaProto = false;
}
@Override
public synchronized int getId() {
TaskIdProtoOrBuilder p = viaProto ? proto : builder;
return (p.getId());
}
@Override
public synchronized void setId(int id) {
maybeInitBuilder();
builder.setId((id));
}
@Override
public synchronized JobId getJobId() {
TaskIdProtoOrBuilder p = viaProto ? proto : builder;
if (this.jobId != null) {
return this.jobId;
}
if (!p.hasJobId()) {
return null;
}
jobId = convertFromProtoFormat(p.getJobId());
return jobId;
}
@Override
public synchronized void setJobId(JobId jobId) {
maybeInitBuilder();
if (jobId == null)
builder.clearJobId();
this.jobId = jobId;
}
@Override
public synchronized TaskType getTaskType() {
TaskIdProtoOrBuilder p = viaProto ? proto : builder;
if (!p.hasTaskType()) {
return null;
}
return convertFromProtoFormat(p.getTaskType());
}
@Override
public synchronized void setTaskType(TaskType taskType) {
maybeInitBuilder();
if (taskType == null) {
builder.clearTaskType();
return;
}
builder.setTaskType(convertToProtoFormat(taskType));
}
private JobIdPBImpl convertFromProtoFormat(JobIdProto p) {
return new JobIdPBImpl(p);
}
private JobIdProto convertToProtoFormat(JobId t) {
return ((JobIdPBImpl)t).getProto();
}
private TaskTypeProto convertToProtoFormat(TaskType e) {
return MRProtoUtils.convertToProtoFormat(e);
}
private TaskType convertFromProtoFormat(TaskTypeProto e) {
return MRProtoUtils.convertFromProtoFormat(e);
}
}
| 4,059 | 27.591549 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/impl/pb/CountersPBImpl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.api.records.impl.pb;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import org.apache.hadoop.mapreduce.v2.api.records.Counter;
import org.apache.hadoop.mapreduce.v2.api.records.CounterGroup;
import org.apache.hadoop.mapreduce.v2.api.records.Counters;
import org.apache.hadoop.mapreduce.v2.proto.MRProtos.CounterGroupProto;
import org.apache.hadoop.mapreduce.v2.proto.MRProtos.CountersProto;
import org.apache.hadoop.mapreduce.v2.proto.MRProtos.CountersProtoOrBuilder;
import org.apache.hadoop.mapreduce.v2.proto.MRProtos.StringCounterGroupMapProto;
import org.apache.hadoop.yarn.api.records.impl.pb.ProtoBase;
public class CountersPBImpl extends ProtoBase<CountersProto> implements Counters {
CountersProto proto = CountersProto.getDefaultInstance();
CountersProto.Builder builder = null;
boolean viaProto = false;
private Map<String, CounterGroup> counterGroups = null;
public CountersPBImpl() {
builder = CountersProto.newBuilder();
}
public CountersPBImpl(CountersProto proto) {
this.proto = proto;
viaProto = true;
}
public CountersProto getProto() {
mergeLocalToProto();
proto = viaProto ? proto : builder.build();
viaProto = true;
return proto;
}
private void mergeLocalToBuilder() {
if (this.counterGroups != null) {
addCounterGroupsToProto();
}
}
private void mergeLocalToProto() {
if (viaProto)
maybeInitBuilder();
mergeLocalToBuilder();
proto = builder.build();
viaProto = true;
}
private void maybeInitBuilder() {
if (viaProto || builder == null) {
builder = CountersProto.newBuilder(proto);
}
viaProto = false;
}
@Override
public Map<String, CounterGroup> getAllCounterGroups() {
initCounterGroups();
return this.counterGroups;
}
@Override
public CounterGroup getCounterGroup(String key) {
initCounterGroups();
return this.counterGroups.get(key);
}
@Override
public Counter getCounter(Enum<?> key) {
CounterGroup group = getCounterGroup(key.getDeclaringClass().getName());
return group == null ? null : group.getCounter(key.name());
}
@Override
public void incrCounter(Enum<?> key, long amount) {
String groupName = key.getDeclaringClass().getName();
if (getCounterGroup(groupName) == null) {
CounterGroup cGrp = new CounterGroupPBImpl();
cGrp.setName(groupName);
cGrp.setDisplayName(groupName);
setCounterGroup(groupName, cGrp);
}
if (getCounterGroup(groupName).getCounter(key.name()) == null) {
Counter c = new CounterPBImpl();
c.setName(key.name());
c.setDisplayName(key.name());
c.setValue(0l);
getCounterGroup(groupName).setCounter(key.name(), c);
}
Counter counter = getCounterGroup(groupName).getCounter(key.name());
counter.setValue(counter.getValue() + amount);
}
private void initCounterGroups() {
if (this.counterGroups != null) {
return;
}
CountersProtoOrBuilder p = viaProto ? proto : builder;
List<StringCounterGroupMapProto> list = p.getCounterGroupsList();
this.counterGroups = new HashMap<String, CounterGroup>();
for (StringCounterGroupMapProto c : list) {
this.counterGroups.put(c.getKey(), convertFromProtoFormat(c.getValue()));
}
}
@Override
public void addAllCounterGroups(final Map<String, CounterGroup> counterGroups) {
if (counterGroups == null)
return;
initCounterGroups();
this.counterGroups.putAll(counterGroups);
}
private void addCounterGroupsToProto() {
maybeInitBuilder();
builder.clearCounterGroups();
if (counterGroups == null)
return;
Iterable<StringCounterGroupMapProto> iterable = new Iterable<StringCounterGroupMapProto>() {
@Override
public Iterator<StringCounterGroupMapProto> iterator() {
return new Iterator<StringCounterGroupMapProto>() {
Iterator<String> keyIter = counterGroups.keySet().iterator();
@Override
public void remove() {
throw new UnsupportedOperationException();
}
@Override
public StringCounterGroupMapProto next() {
String key = keyIter.next();
return StringCounterGroupMapProto.newBuilder().setKey(key).setValue(convertToProtoFormat(counterGroups.get(key))).build();
}
@Override
public boolean hasNext() {
return keyIter.hasNext();
}
};
}
};
builder.addAllCounterGroups(iterable);
}
@Override
public void setCounterGroup(String key, CounterGroup val) {
initCounterGroups();
this.counterGroups.put(key, val);
}
@Override
public void removeCounterGroup(String key) {
initCounterGroups();
this.counterGroups.remove(key);
}
@Override
public void clearCounterGroups() {
initCounterGroups();
this.counterGroups.clear();
}
private CounterGroupPBImpl convertFromProtoFormat(CounterGroupProto p) {
return new CounterGroupPBImpl(p);
}
private CounterGroupProto convertToProtoFormat(CounterGroup t) {
return ((CounterGroupPBImpl)t).getProto();
}
}
| 6,091 | 29.767677 | 134 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/impl/pb/TaskAttemptReportPBImpl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.api.records.impl.pb;
import org.apache.hadoop.mapreduce.TypeConverter;
import org.apache.hadoop.mapreduce.v2.api.records.Counters;
import org.apache.hadoop.mapreduce.v2.api.records.Phase;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptReport;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptState;
import org.apache.hadoop.mapreduce.v2.proto.MRProtos.CountersProto;
import org.apache.hadoop.mapreduce.v2.proto.MRProtos.PhaseProto;
import org.apache.hadoop.mapreduce.v2.proto.MRProtos.TaskAttemptIdProto;
import org.apache.hadoop.mapreduce.v2.proto.MRProtos.TaskAttemptReportProto;
import org.apache.hadoop.mapreduce.v2.proto.MRProtos.TaskAttemptReportProtoOrBuilder;
import org.apache.hadoop.mapreduce.v2.proto.MRProtos.TaskAttemptStateProto;
import org.apache.hadoop.mapreduce.v2.util.MRProtoUtils;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.impl.pb.ContainerIdPBImpl;
import org.apache.hadoop.yarn.api.records.impl.pb.ProtoBase;
import org.apache.hadoop.yarn.proto.YarnProtos.ContainerIdProto;
public class TaskAttemptReportPBImpl extends ProtoBase<TaskAttemptReportProto> implements TaskAttemptReport {
TaskAttemptReportProto proto = TaskAttemptReportProto.getDefaultInstance();
TaskAttemptReportProto.Builder builder = null;
boolean viaProto = false;
private TaskAttemptId taskAttemptId = null;
private Counters counters = null;
private org.apache.hadoop.mapreduce.Counters rawCounters = null;
private ContainerId containerId = null;
public TaskAttemptReportPBImpl() {
builder = TaskAttemptReportProto.newBuilder();
}
public TaskAttemptReportPBImpl(TaskAttemptReportProto proto) {
this.proto = proto;
viaProto = true;
}
public TaskAttemptReportProto getProto() {
mergeLocalToProto();
proto = viaProto ? proto : builder.build();
viaProto = true;
return proto;
}
private void mergeLocalToBuilder() {
if (this.taskAttemptId != null) {
builder.setTaskAttemptId(convertToProtoFormat(this.taskAttemptId));
}
convertRawCountersToCounters();
if (this.counters != null) {
builder.setCounters(convertToProtoFormat(this.counters));
}
if (this.containerId != null) {
builder.setContainerId(convertToProtoFormat(this.containerId));
}
}
private void mergeLocalToProto() {
if (viaProto)
maybeInitBuilder();
mergeLocalToBuilder();
proto = builder.build();
viaProto = true;
}
private void maybeInitBuilder() {
if (viaProto || builder == null) {
builder = TaskAttemptReportProto.newBuilder(proto);
}
viaProto = false;
}
@Override
public Counters getCounters() {
TaskAttemptReportProtoOrBuilder p = viaProto ? proto : builder;
convertRawCountersToCounters();
if (this.counters != null) {
return this.counters;
}
if (!p.hasCounters()) {
return null;
}
this.counters = convertFromProtoFormat(p.getCounters());
return this.counters;
}
@Override
public void setCounters(Counters counters) {
maybeInitBuilder();
if (counters == null) {
builder.clearCounters();
}
this.counters = counters;
this.rawCounters = null;
}
@Override
public org.apache.hadoop.mapreduce.Counters
getRawCounters() {
return this.rawCounters;
}
@Override
public void setRawCounters(org.apache.hadoop.mapreduce.Counters rCounters) {
setCounters(null);
this.rawCounters = rCounters;
}
private void convertRawCountersToCounters() {
if (this.counters == null && this.rawCounters != null) {
this.counters = TypeConverter.toYarn(rawCounters);
this.rawCounters = null;
}
}
@Override
public long getStartTime() {
TaskAttemptReportProtoOrBuilder p = viaProto ? proto : builder;
return (p.getStartTime());
}
@Override
public void setStartTime(long startTime) {
maybeInitBuilder();
builder.setStartTime((startTime));
}
@Override
public long getFinishTime() {
TaskAttemptReportProtoOrBuilder p = viaProto ? proto : builder;
return (p.getFinishTime());
}
@Override
public void setFinishTime(long finishTime) {
maybeInitBuilder();
builder.setFinishTime((finishTime));
}
@Override
public long getShuffleFinishTime() {
TaskAttemptReportProtoOrBuilder p = viaProto ? proto : builder;
return (p.getShuffleFinishTime());
}
@Override
public void setShuffleFinishTime(long time) {
maybeInitBuilder();
builder.setShuffleFinishTime(time);
}
@Override
public long getSortFinishTime() {
TaskAttemptReportProtoOrBuilder p = viaProto ? proto : builder;
return (p.getSortFinishTime());
}
@Override
public void setSortFinishTime(long time) {
maybeInitBuilder();
builder.setSortFinishTime(time);
}
@Override
public TaskAttemptId getTaskAttemptId() {
TaskAttemptReportProtoOrBuilder p = viaProto ? proto : builder;
if (this.taskAttemptId != null) {
return this.taskAttemptId;
}
if (!p.hasTaskAttemptId()) {
return null;
}
this.taskAttemptId = convertFromProtoFormat(p.getTaskAttemptId());
return this.taskAttemptId;
}
@Override
public void setTaskAttemptId(TaskAttemptId taskAttemptId) {
maybeInitBuilder();
if (taskAttemptId == null)
builder.clearTaskAttemptId();
this.taskAttemptId = taskAttemptId;
}
@Override
public TaskAttemptState getTaskAttemptState() {
TaskAttemptReportProtoOrBuilder p = viaProto ? proto : builder;
if (!p.hasTaskAttemptState()) {
return null;
}
return convertFromProtoFormat(p.getTaskAttemptState());
}
@Override
public void setTaskAttemptState(TaskAttemptState taskAttemptState) {
maybeInitBuilder();
if (taskAttemptState == null) {
builder.clearTaskAttemptState();
return;
}
builder.setTaskAttemptState(convertToProtoFormat(taskAttemptState));
}
@Override
public float getProgress() {
TaskAttemptReportProtoOrBuilder p = viaProto ? proto : builder;
return (p.getProgress());
}
@Override
public void setProgress(float progress) {
maybeInitBuilder();
builder.setProgress((progress));
}
@Override
public String getDiagnosticInfo() {
TaskAttemptReportProtoOrBuilder p = viaProto ? proto : builder;
if (!p.hasDiagnosticInfo()) {
return null;
}
return (p.getDiagnosticInfo());
}
@Override
public void setDiagnosticInfo(String diagnosticInfo) {
maybeInitBuilder();
if (diagnosticInfo == null) {
builder.clearDiagnosticInfo();
return;
}
builder.setDiagnosticInfo((diagnosticInfo));
}
@Override
public String getStateString() {
TaskAttemptReportProtoOrBuilder p = viaProto ? proto : builder;
if (!p.hasStateString()) {
return null;
}
return (p.getStateString());
}
@Override
public void setStateString(String stateString) {
maybeInitBuilder();
if (stateString == null) {
builder.clearStateString();
return;
}
builder.setStateString((stateString));
}
@Override
public Phase getPhase() {
TaskAttemptReportProtoOrBuilder p = viaProto ? proto : builder;
if (!p.hasPhase()) {
return null;
}
return convertFromProtoFormat(p.getPhase());
}
@Override
public void setPhase(Phase phase) {
maybeInitBuilder();
if (phase == null) {
builder.clearPhase();
return;
}
builder.setPhase(convertToProtoFormat(phase));
}
@Override
public String getNodeManagerHost() {
TaskAttemptReportProtoOrBuilder p = viaProto ? proto : builder;
if (!p.hasNodeManagerHost()) {
return null;
}
return p.getNodeManagerHost();
}
@Override
public void setNodeManagerHost(String nmHost) {
maybeInitBuilder();
if (nmHost == null) {
builder.clearNodeManagerHost();
return;
}
builder.setNodeManagerHost(nmHost);
}
@Override
public int getNodeManagerPort() {
TaskAttemptReportProtoOrBuilder p = viaProto ? proto : builder;
return (p.getNodeManagerPort());
}
@Override
public void setNodeManagerPort(int nmPort) {
maybeInitBuilder();
builder.setNodeManagerPort(nmPort);
}
@Override
public int getNodeManagerHttpPort() {
TaskAttemptReportProtoOrBuilder p = viaProto ? proto : builder;
return (p.getNodeManagerHttpPort());
}
@Override
public void setNodeManagerHttpPort(int nmHttpPort) {
maybeInitBuilder();
builder.setNodeManagerHttpPort(nmHttpPort);
}
@Override
public ContainerId getContainerId() {
TaskAttemptReportProtoOrBuilder p = viaProto ? proto : builder;
if (containerId != null) {
return containerId;
} // Else via proto
if (!p.hasContainerId()) {
return null;
}
containerId = convertFromProtoFormat(p.getContainerId());
return containerId;
}
@Override
public void setContainerId(ContainerId containerId) {
maybeInitBuilder();
if (containerId == null) {
builder.clearContainerId();
}
this.containerId = containerId;
}
private ContainerIdProto convertToProtoFormat(ContainerId t) {
return ((ContainerIdPBImpl)t).getProto();
}
private ContainerIdPBImpl convertFromProtoFormat(ContainerIdProto p) {
return new ContainerIdPBImpl(p);
}
private CountersPBImpl convertFromProtoFormat(CountersProto p) {
return new CountersPBImpl(p);
}
private CountersProto convertToProtoFormat(Counters t) {
return ((CountersPBImpl)t).getProto();
}
private TaskAttemptIdPBImpl convertFromProtoFormat(TaskAttemptIdProto p) {
return new TaskAttemptIdPBImpl(p);
}
private TaskAttemptIdProto convertToProtoFormat(TaskAttemptId t) {
return ((TaskAttemptIdPBImpl)t).getProto();
}
private TaskAttemptStateProto convertToProtoFormat(TaskAttemptState e) {
return MRProtoUtils.convertToProtoFormat(e);
}
private TaskAttemptState convertFromProtoFormat(TaskAttemptStateProto e) {
return MRProtoUtils.convertFromProtoFormat(e);
}
private PhaseProto convertToProtoFormat(Phase e) {
return MRProtoUtils.convertToProtoFormat(e);
}
private Phase convertFromProtoFormat(PhaseProto e) {
return MRProtoUtils.convertFromProtoFormat(e);
}
}
| 11,196 | 27.346835 | 109 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/impl/pb/TaskAttemptCompletionEventPBImpl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.api.records.impl.pb;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptCompletionEvent;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptCompletionEventStatus;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
import org.apache.hadoop.mapreduce.v2.proto.MRProtos.TaskAttemptCompletionEventProto;
import org.apache.hadoop.mapreduce.v2.proto.MRProtos.TaskAttemptCompletionEventProtoOrBuilder;
import org.apache.hadoop.mapreduce.v2.proto.MRProtos.TaskAttemptCompletionEventStatusProto;
import org.apache.hadoop.mapreduce.v2.proto.MRProtos.TaskAttemptIdProto;
import org.apache.hadoop.mapreduce.v2.util.MRProtoUtils;
import org.apache.hadoop.yarn.api.records.impl.pb.ProtoBase;
public class TaskAttemptCompletionEventPBImpl extends ProtoBase<TaskAttemptCompletionEventProto> implements TaskAttemptCompletionEvent {
TaskAttemptCompletionEventProto proto = TaskAttemptCompletionEventProto.getDefaultInstance();
TaskAttemptCompletionEventProto.Builder builder = null;
boolean viaProto = false;
private TaskAttemptId taskAttemptId = null;
public TaskAttemptCompletionEventPBImpl() {
builder = TaskAttemptCompletionEventProto.newBuilder();
}
public TaskAttemptCompletionEventPBImpl(TaskAttemptCompletionEventProto proto) {
this.proto = proto;
viaProto = true;
}
public TaskAttemptCompletionEventProto getProto() {
mergeLocalToProto();
proto = viaProto ? proto : builder.build();
viaProto = true;
return proto;
}
private void mergeLocalToBuilder() {
if (this.taskAttemptId != null) {
builder.setAttemptId(convertToProtoFormat(this.taskAttemptId));
}
}
private void mergeLocalToProto() {
if (viaProto)
maybeInitBuilder();
mergeLocalToBuilder();
proto = builder.build();
viaProto = true;
}
private void maybeInitBuilder() {
if (viaProto || builder == null) {
builder = TaskAttemptCompletionEventProto.newBuilder(proto);
}
viaProto = false;
}
@Override
public TaskAttemptId getAttemptId() {
TaskAttemptCompletionEventProtoOrBuilder p = viaProto ? proto : builder;
if (this.taskAttemptId != null) {
return this.taskAttemptId;
}
if (!p.hasAttemptId()) {
return null;
}
this.taskAttemptId = convertFromProtoFormat(p.getAttemptId());
return this.taskAttemptId;
}
@Override
public void setAttemptId(TaskAttemptId attemptId) {
maybeInitBuilder();
if (attemptId == null)
builder.clearAttemptId();
this.taskAttemptId = attemptId;
}
@Override
public TaskAttemptCompletionEventStatus getStatus() {
TaskAttemptCompletionEventProtoOrBuilder p = viaProto ? proto : builder;
if (!p.hasStatus()) {
return null;
}
return convertFromProtoFormat(p.getStatus());
}
@Override
public void setStatus(TaskAttemptCompletionEventStatus status) {
maybeInitBuilder();
if (status == null) {
builder.clearStatus();
return;
}
builder.setStatus(convertToProtoFormat(status));
}
@Override
public String getMapOutputServerAddress() {
TaskAttemptCompletionEventProtoOrBuilder p = viaProto ? proto : builder;
if (!p.hasMapOutputServerAddress()) {
return null;
}
return (p.getMapOutputServerAddress());
}
@Override
public void setMapOutputServerAddress(String mapOutputServerAddress) {
maybeInitBuilder();
if (mapOutputServerAddress == null) {
builder.clearMapOutputServerAddress();
return;
}
builder.setMapOutputServerAddress((mapOutputServerAddress));
}
@Override
public int getAttemptRunTime() {
TaskAttemptCompletionEventProtoOrBuilder p = viaProto ? proto : builder;
return (p.getAttemptRunTime());
}
@Override
public void setAttemptRunTime(int attemptRunTime) {
maybeInitBuilder();
builder.setAttemptRunTime((attemptRunTime));
}
@Override
public int getEventId() {
TaskAttemptCompletionEventProtoOrBuilder p = viaProto ? proto : builder;
return (p.getEventId());
}
@Override
public void setEventId(int eventId) {
maybeInitBuilder();
builder.setEventId((eventId));
}
private TaskAttemptIdPBImpl convertFromProtoFormat(TaskAttemptIdProto p) {
return new TaskAttemptIdPBImpl(p);
}
private TaskAttemptIdProto convertToProtoFormat(TaskAttemptId t) {
return ((TaskAttemptIdPBImpl)t).getProto();
}
private TaskAttemptCompletionEventStatusProto convertToProtoFormat(TaskAttemptCompletionEventStatus e) {
return MRProtoUtils.convertToProtoFormat(e);
}
private TaskAttemptCompletionEventStatus convertFromProtoFormat(TaskAttemptCompletionEventStatusProto e) {
return MRProtoUtils.convertFromProtoFormat(e);
}
}
| 5,590 | 30.410112 | 136 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/impl/pb/AMInfoPBImpl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.api.records.impl.pb;
import org.apache.hadoop.mapreduce.v2.api.records.AMInfo;
import org.apache.hadoop.mapreduce.v2.proto.MRProtos.AMInfoProto;
import org.apache.hadoop.mapreduce.v2.proto.MRProtos.AMInfoProtoOrBuilder;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationAttemptIdPBImpl;
import org.apache.hadoop.yarn.api.records.impl.pb.ContainerIdPBImpl;
import org.apache.hadoop.yarn.api.records.impl.pb.ProtoBase;
import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationAttemptIdProto;
import org.apache.hadoop.yarn.proto.YarnProtos.ContainerIdProto;
public class AMInfoPBImpl extends ProtoBase<AMInfoProto> implements AMInfo {
AMInfoProto proto = AMInfoProto.getDefaultInstance();
AMInfoProto.Builder builder = null;
boolean viaProto = false;
private ApplicationAttemptId appAttemptId;
private ContainerId containerId;
public AMInfoPBImpl() {
builder = AMInfoProto.newBuilder();
}
public AMInfoPBImpl(AMInfoProto proto) {
this.proto = proto;
viaProto = true;
}
public synchronized AMInfoProto getProto() {
mergeLocalToProto();
proto = viaProto ? proto : builder.build();
viaProto = true;
return proto;
}
private synchronized void mergeLocalToBuilder() {
if (this.appAttemptId != null
&& !((ApplicationAttemptIdPBImpl) this.appAttemptId).getProto().equals(
builder.getApplicationAttemptId())) {
builder.setApplicationAttemptId(convertToProtoFormat(this.appAttemptId));
}
if (this.getContainerId() != null
&& !((ContainerIdPBImpl) this.containerId).getProto().equals(
builder.getContainerId())) {
builder.setContainerId(convertToProtoFormat(this.containerId));
}
}
private synchronized void mergeLocalToProto() {
if (viaProto)
maybeInitBuilder();
mergeLocalToBuilder();
proto = builder.build();
viaProto = true;
}
private synchronized void maybeInitBuilder() {
if (viaProto || builder == null) {
builder = AMInfoProto.newBuilder(proto);
}
viaProto = false;
}
@Override
public synchronized ApplicationAttemptId getAppAttemptId() {
AMInfoProtoOrBuilder p = viaProto ? proto : builder;
if (appAttemptId != null) {
return appAttemptId;
} // Else via proto
if (!p.hasApplicationAttemptId()) {
return null;
}
appAttemptId = convertFromProtoFormat(p.getApplicationAttemptId());
return appAttemptId;
}
@Override
public synchronized void setAppAttemptId(ApplicationAttemptId appAttemptId) {
maybeInitBuilder();
if (appAttemptId == null) {
builder.clearApplicationAttemptId();
}
this.appAttemptId = appAttemptId;
}
@Override
public synchronized long getStartTime() {
AMInfoProtoOrBuilder p = viaProto ? proto : builder;
return (p.getStartTime());
}
@Override
public synchronized void setStartTime(long startTime) {
maybeInitBuilder();
builder.setStartTime(startTime);
}
@Override
public synchronized ContainerId getContainerId() {
AMInfoProtoOrBuilder p = viaProto ? proto : builder;
if (containerId != null) {
return containerId;
} // Else via proto
if (!p.hasContainerId()) {
return null;
}
containerId = convertFromProtoFormat(p.getContainerId());
return containerId;
}
@Override
public synchronized void setContainerId(ContainerId containerId) {
maybeInitBuilder();
if (containerId == null) {
builder.clearContainerId();
}
this.containerId = containerId;
}
@Override
public synchronized String getNodeManagerHost() {
AMInfoProtoOrBuilder p = viaProto ? proto : builder;
if (!p.hasNodeManagerHost()) {
return null;
}
return p.getNodeManagerHost();
}
@Override
public synchronized void setNodeManagerHost(String nmHost) {
maybeInitBuilder();
if (nmHost == null) {
builder.clearNodeManagerHost();
return;
}
builder.setNodeManagerHost(nmHost);
}
@Override
public synchronized int getNodeManagerPort() {
AMInfoProtoOrBuilder p = viaProto ? proto : builder;
return (p.getNodeManagerPort());
}
@Override
public synchronized void setNodeManagerPort(int nmPort) {
maybeInitBuilder();
builder.setNodeManagerPort(nmPort);
}
@Override
public synchronized int getNodeManagerHttpPort() {
AMInfoProtoOrBuilder p = viaProto ? proto : builder;
return p.getNodeManagerHttpPort();
}
@Override
public synchronized void setNodeManagerHttpPort(int httpPort) {
maybeInitBuilder();
builder.setNodeManagerHttpPort(httpPort);
}
private ApplicationAttemptIdPBImpl convertFromProtoFormat(
ApplicationAttemptIdProto p) {
return new ApplicationAttemptIdPBImpl(p);
}
private ContainerIdPBImpl convertFromProtoFormat(ContainerIdProto p) {
return new ContainerIdPBImpl(p);
}
private
ApplicationAttemptIdProto convertToProtoFormat(ApplicationAttemptId t) {
return ((ApplicationAttemptIdPBImpl) t).getProto();
}
private ContainerIdProto convertToProtoFormat(ContainerId t) {
return ((ContainerIdPBImpl) t).getProto();
}
}
| 6,090 | 29.153465 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/impl/pb/service/package-info.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
@InterfaceAudience.Private
package org.apache.hadoop.mapreduce.v2.api.impl.pb.service;
import org.apache.hadoop.classification.InterfaceAudience;
| 952 | 44.380952 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/impl/pb/service/HSClientProtocolPBServiceImpl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.api.impl.pb.service;
import org.apache.hadoop.mapreduce.v2.api.HSClientProtocol;
import org.apache.hadoop.mapreduce.v2.api.HSClientProtocolPB;
public class HSClientProtocolPBServiceImpl extends MRClientProtocolPBServiceImpl
implements HSClientProtocolPB {
public HSClientProtocolPBServiceImpl(HSClientProtocol impl) {
super(impl);
}
}
| 1,196 | 38.9 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/impl/pb/service/MRClientProtocolPBServiceImpl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.api.impl.pb.service;
import java.io.IOException;
import org.apache.hadoop.mapreduce.v2.api.MRClientProtocol;
import org.apache.hadoop.mapreduce.v2.api.MRClientProtocolPB;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.CancelDelegationTokenResponse;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.FailTaskAttemptRequest;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.FailTaskAttemptResponse;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetCountersRequest;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetCountersResponse;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDelegationTokenRequest;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDelegationTokenResponse;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDiagnosticsRequest;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDiagnosticsResponse;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetJobReportResponse;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskAttemptCompletionEventsRequest;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskAttemptCompletionEventsResponse;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskAttemptReportRequest;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskAttemptReportResponse;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskReportRequest;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskReportResponse;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskReportsRequest;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskReportsResponse;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.KillJobRequest;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.KillJobResponse;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.KillTaskAttemptRequest;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.KillTaskAttemptResponse;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.KillTaskRequest;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.KillTaskResponse;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.RenewDelegationTokenResponse;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.CancelDelegationTokenRequestPBImpl;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.CancelDelegationTokenResponsePBImpl;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.FailTaskAttemptRequestPBImpl;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.FailTaskAttemptResponsePBImpl;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.GetCountersRequestPBImpl;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.GetCountersResponsePBImpl;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.GetDelegationTokenRequestPBImpl;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.GetDelegationTokenResponsePBImpl;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.GetDiagnosticsRequestPBImpl;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.GetDiagnosticsResponsePBImpl;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.GetJobReportRequestPBImpl;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.GetJobReportResponsePBImpl;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.GetTaskAttemptCompletionEventsRequestPBImpl;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.GetTaskAttemptCompletionEventsResponsePBImpl;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.GetTaskAttemptReportRequestPBImpl;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.GetTaskAttemptReportResponsePBImpl;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.GetTaskReportRequestPBImpl;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.GetTaskReportResponsePBImpl;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.GetTaskReportsRequestPBImpl;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.GetTaskReportsResponsePBImpl;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.KillJobRequestPBImpl;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.KillJobResponsePBImpl;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.KillTaskAttemptRequestPBImpl;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.KillTaskAttemptResponsePBImpl;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.KillTaskRequestPBImpl;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.KillTaskResponsePBImpl;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.RenewDelegationTokenRequestPBImpl;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.RenewDelegationTokenResponsePBImpl;
import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.FailTaskAttemptRequestProto;
import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.FailTaskAttemptResponseProto;
import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetCountersRequestProto;
import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetCountersResponseProto;
import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetDiagnosticsRequestProto;
import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetDiagnosticsResponseProto;
import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetJobReportRequestProto;
import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetJobReportResponseProto;
import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetTaskAttemptCompletionEventsRequestProto;
import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetTaskAttemptCompletionEventsResponseProto;
import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetTaskAttemptReportRequestProto;
import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetTaskAttemptReportResponseProto;
import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetTaskReportRequestProto;
import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetTaskReportResponseProto;
import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetTaskReportsRequestProto;
import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetTaskReportsResponseProto;
import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.KillJobRequestProto;
import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.KillJobResponseProto;
import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.KillTaskAttemptRequestProto;
import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.KillTaskAttemptResponseProto;
import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.KillTaskRequestProto;
import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.KillTaskResponseProto;
import org.apache.hadoop.security.proto.SecurityProtos.CancelDelegationTokenRequestProto;
import org.apache.hadoop.security.proto.SecurityProtos.CancelDelegationTokenResponseProto;
import org.apache.hadoop.security.proto.SecurityProtos.GetDelegationTokenRequestProto;
import org.apache.hadoop.security.proto.SecurityProtos.GetDelegationTokenResponseProto;
import org.apache.hadoop.security.proto.SecurityProtos.RenewDelegationTokenRequestProto;
import org.apache.hadoop.security.proto.SecurityProtos.RenewDelegationTokenResponseProto;
import com.google.protobuf.RpcController;
import com.google.protobuf.ServiceException;
public class MRClientProtocolPBServiceImpl implements MRClientProtocolPB {
private MRClientProtocol real;
public MRClientProtocolPBServiceImpl(MRClientProtocol impl) {
this.real = impl;
}
@Override
public GetJobReportResponseProto getJobReport(RpcController controller,
GetJobReportRequestProto proto) throws ServiceException {
GetJobReportRequestPBImpl request = new GetJobReportRequestPBImpl(proto);
try {
GetJobReportResponse response = real.getJobReport(request);
return ((GetJobReportResponsePBImpl)response).getProto();
} catch (IOException e) {
throw new ServiceException(e);
}
}
@Override
public GetTaskReportResponseProto getTaskReport(RpcController controller,
GetTaskReportRequestProto proto) throws ServiceException {
GetTaskReportRequest request = new GetTaskReportRequestPBImpl(proto);
try {
GetTaskReportResponse response = real.getTaskReport(request);
return ((GetTaskReportResponsePBImpl)response).getProto();
} catch (IOException e) {
throw new ServiceException(e);
}
}
@Override
public GetTaskAttemptReportResponseProto getTaskAttemptReport(
RpcController controller, GetTaskAttemptReportRequestProto proto)
throws ServiceException {
GetTaskAttemptReportRequest request = new GetTaskAttemptReportRequestPBImpl(proto);
try {
GetTaskAttemptReportResponse response = real.getTaskAttemptReport(request);
return ((GetTaskAttemptReportResponsePBImpl)response).getProto();
} catch (IOException e) {
throw new ServiceException(e);
}
}
@Override
public GetCountersResponseProto getCounters(RpcController controller,
GetCountersRequestProto proto) throws ServiceException {
GetCountersRequest request = new GetCountersRequestPBImpl(proto);
try {
GetCountersResponse response = real.getCounters(request);
return ((GetCountersResponsePBImpl)response).getProto();
} catch (IOException e) {
throw new ServiceException(e);
}
}
@Override
public GetTaskAttemptCompletionEventsResponseProto getTaskAttemptCompletionEvents(
RpcController controller,
GetTaskAttemptCompletionEventsRequestProto proto)
throws ServiceException {
GetTaskAttemptCompletionEventsRequest request = new GetTaskAttemptCompletionEventsRequestPBImpl(proto);
try {
GetTaskAttemptCompletionEventsResponse response = real.getTaskAttemptCompletionEvents(request);
return ((GetTaskAttemptCompletionEventsResponsePBImpl)response).getProto();
} catch (IOException e) {
throw new ServiceException(e);
}
}
@Override
public GetTaskReportsResponseProto getTaskReports(RpcController controller,
GetTaskReportsRequestProto proto) throws ServiceException {
GetTaskReportsRequest request = new GetTaskReportsRequestPBImpl(proto);
try {
GetTaskReportsResponse response = real.getTaskReports(request);
return ((GetTaskReportsResponsePBImpl)response).getProto();
} catch (IOException e) {
throw new ServiceException(e);
}
}
@Override
public GetDiagnosticsResponseProto getDiagnostics(RpcController controller,
GetDiagnosticsRequestProto proto) throws ServiceException {
GetDiagnosticsRequest request = new GetDiagnosticsRequestPBImpl(proto);
try {
GetDiagnosticsResponse response = real.getDiagnostics(request);
return ((GetDiagnosticsResponsePBImpl)response).getProto();
} catch (IOException e) {
throw new ServiceException(e);
}
}
@Override
public GetDelegationTokenResponseProto getDelegationToken(
RpcController controller, GetDelegationTokenRequestProto proto)
throws ServiceException {
GetDelegationTokenRequest request = new GetDelegationTokenRequestPBImpl(proto);
try {
GetDelegationTokenResponse response = real.getDelegationToken(request);
return ((GetDelegationTokenResponsePBImpl)response).getProto();
} catch (IOException e) {
throw new ServiceException(e);
}
}
@Override
public KillJobResponseProto killJob(RpcController controller,
KillJobRequestProto proto) throws ServiceException {
KillJobRequest request = new KillJobRequestPBImpl(proto);
try {
KillJobResponse response = real.killJob(request);
return ((KillJobResponsePBImpl)response).getProto();
} catch (IOException e) {
throw new ServiceException(e);
}
}
@Override
public KillTaskResponseProto killTask(RpcController controller,
KillTaskRequestProto proto) throws ServiceException {
KillTaskRequest request = new KillTaskRequestPBImpl(proto);
try {
KillTaskResponse response = real.killTask(request);
return ((KillTaskResponsePBImpl)response).getProto();
} catch (IOException e) {
throw new ServiceException(e);
}
}
@Override
public KillTaskAttemptResponseProto killTaskAttempt(RpcController controller,
KillTaskAttemptRequestProto proto) throws ServiceException {
KillTaskAttemptRequest request = new KillTaskAttemptRequestPBImpl(proto);
try {
KillTaskAttemptResponse response = real.killTaskAttempt(request);
return ((KillTaskAttemptResponsePBImpl)response).getProto();
} catch (IOException e) {
throw new ServiceException(e);
}
}
@Override
public FailTaskAttemptResponseProto failTaskAttempt(RpcController controller,
FailTaskAttemptRequestProto proto) throws ServiceException {
FailTaskAttemptRequest request = new FailTaskAttemptRequestPBImpl(proto);
try {
FailTaskAttemptResponse response = real.failTaskAttempt(request);
return ((FailTaskAttemptResponsePBImpl)response).getProto();
} catch (IOException e) {
throw new ServiceException(e);
}
}
@Override
public RenewDelegationTokenResponseProto renewDelegationToken(
RpcController controller, RenewDelegationTokenRequestProto proto)
throws ServiceException {
RenewDelegationTokenRequestPBImpl request =
new RenewDelegationTokenRequestPBImpl(proto);
try {
RenewDelegationTokenResponse response = real.renewDelegationToken(request);
return ((RenewDelegationTokenResponsePBImpl)response).getProto();
} catch (IOException e) {
throw new ServiceException(e);
}
}
@Override
public CancelDelegationTokenResponseProto cancelDelegationToken(
RpcController controller, CancelDelegationTokenRequestProto proto)
throws ServiceException {
CancelDelegationTokenRequestPBImpl request =
new CancelDelegationTokenRequestPBImpl(proto);
try {
CancelDelegationTokenResponse response = real.cancelDelegationToken(request);
return ((CancelDelegationTokenResponsePBImpl)response).getProto();
} catch (IOException e) {
throw new ServiceException(e);
}
}
}
| 15,264 | 50.921769 | 111 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/impl/pb/client/package-info.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
@InterfaceAudience.Private
package org.apache.hadoop.mapreduce.v2.api.impl.pb.client;
import org.apache.hadoop.classification.InterfaceAudience;
| 951 | 44.333333 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/impl/pb/client/MRClientProtocolPBClientImpl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.api.impl.pb.client;
import java.io.Closeable;
import java.io.IOException;
import java.lang.reflect.UndeclaredThrowableException;
import java.net.InetSocketAddress;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.ipc.ProtobufRpcEngine;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.mapreduce.v2.api.MRClientProtocol;
import org.apache.hadoop.mapreduce.v2.api.MRClientProtocolPB;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.CancelDelegationTokenRequest;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.CancelDelegationTokenResponse;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.FailTaskAttemptRequest;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.FailTaskAttemptResponse;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetCountersRequest;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetCountersResponse;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDelegationTokenRequest;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDelegationTokenResponse;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDiagnosticsRequest;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDiagnosticsResponse;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetJobReportRequest;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetJobReportResponse;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskAttemptCompletionEventsRequest;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskAttemptCompletionEventsResponse;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskAttemptReportRequest;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskAttemptReportResponse;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskReportRequest;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskReportResponse;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskReportsRequest;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskReportsResponse;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.KillJobRequest;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.KillJobResponse;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.KillTaskAttemptRequest;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.KillTaskAttemptResponse;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.KillTaskRequest;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.KillTaskResponse;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.RenewDelegationTokenRequest;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.RenewDelegationTokenResponse;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.CancelDelegationTokenRequestPBImpl;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.CancelDelegationTokenResponsePBImpl;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.FailTaskAttemptRequestPBImpl;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.FailTaskAttemptResponsePBImpl;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.GetCountersRequestPBImpl;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.GetCountersResponsePBImpl;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.GetDelegationTokenRequestPBImpl;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.GetDelegationTokenResponsePBImpl;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.GetDiagnosticsRequestPBImpl;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.GetDiagnosticsResponsePBImpl;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.GetJobReportRequestPBImpl;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.GetJobReportResponsePBImpl;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.GetTaskAttemptCompletionEventsRequestPBImpl;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.GetTaskAttemptCompletionEventsResponsePBImpl;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.GetTaskAttemptReportRequestPBImpl;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.GetTaskAttemptReportResponsePBImpl;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.GetTaskReportRequestPBImpl;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.GetTaskReportResponsePBImpl;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.GetTaskReportsRequestPBImpl;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.GetTaskReportsResponsePBImpl;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.KillJobRequestPBImpl;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.KillJobResponsePBImpl;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.KillTaskAttemptRequestPBImpl;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.KillTaskAttemptResponsePBImpl;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.KillTaskRequestPBImpl;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.KillTaskResponsePBImpl;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.RenewDelegationTokenRequestPBImpl;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.RenewDelegationTokenResponsePBImpl;
import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.FailTaskAttemptRequestProto;
import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetCountersRequestProto;
import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetDiagnosticsRequestProto;
import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetJobReportRequestProto;
import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetTaskAttemptCompletionEventsRequestProto;
import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetTaskAttemptReportRequestProto;
import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetTaskReportRequestProto;
import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetTaskReportsRequestProto;
import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.KillJobRequestProto;
import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.KillTaskAttemptRequestProto;
import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.KillTaskRequestProto;
import org.apache.hadoop.security.proto.SecurityProtos.CancelDelegationTokenRequestProto;
import org.apache.hadoop.security.proto.SecurityProtos.GetDelegationTokenRequestProto;
import org.apache.hadoop.security.proto.SecurityProtos.RenewDelegationTokenRequestProto;
import com.google.protobuf.ServiceException;
public class MRClientProtocolPBClientImpl implements MRClientProtocol,
Closeable {
protected MRClientProtocolPB proxy;
public MRClientProtocolPBClientImpl() {};
public MRClientProtocolPBClientImpl(long clientVersion, InetSocketAddress addr, Configuration conf) throws IOException {
RPC.setProtocolEngine(conf, MRClientProtocolPB.class, ProtobufRpcEngine.class);
proxy = RPC.getProxy(MRClientProtocolPB.class, clientVersion, addr, conf);
}
@Override
public InetSocketAddress getConnectAddress() {
return RPC.getServerAddress(proxy);
}
@Override
public void close() {
if (this.proxy != null) {
RPC.stopProxy(this.proxy);
}
}
@Override
public GetJobReportResponse getJobReport(GetJobReportRequest request)
throws IOException {
GetJobReportRequestProto requestProto = ((GetJobReportRequestPBImpl)request).getProto();
try {
return new GetJobReportResponsePBImpl(proxy.getJobReport(null, requestProto));
} catch (ServiceException e) {
throw unwrapAndThrowException(e);
}
}
@Override
public GetTaskReportResponse getTaskReport(GetTaskReportRequest request)
throws IOException {
GetTaskReportRequestProto requestProto = ((GetTaskReportRequestPBImpl)request).getProto();
try {
return new GetTaskReportResponsePBImpl(proxy.getTaskReport(null, requestProto));
} catch (ServiceException e) {
throw unwrapAndThrowException(e);
}
}
@Override
public GetTaskAttemptReportResponse getTaskAttemptReport(
GetTaskAttemptReportRequest request) throws IOException {
GetTaskAttemptReportRequestProto requestProto = ((GetTaskAttemptReportRequestPBImpl)request).getProto();
try {
return new GetTaskAttemptReportResponsePBImpl(proxy.getTaskAttemptReport(null, requestProto));
} catch (ServiceException e) {
throw unwrapAndThrowException(e);
}
}
@Override
public GetCountersResponse getCounters(GetCountersRequest request)
throws IOException {
GetCountersRequestProto requestProto = ((GetCountersRequestPBImpl)request).getProto();
try {
return new GetCountersResponsePBImpl(proxy.getCounters(null, requestProto));
} catch (ServiceException e) {
throw unwrapAndThrowException(e);
}
}
@Override
public GetTaskAttemptCompletionEventsResponse getTaskAttemptCompletionEvents(
GetTaskAttemptCompletionEventsRequest request) throws IOException {
GetTaskAttemptCompletionEventsRequestProto requestProto = ((GetTaskAttemptCompletionEventsRequestPBImpl)request).getProto();
try {
return new GetTaskAttemptCompletionEventsResponsePBImpl(proxy.getTaskAttemptCompletionEvents(null, requestProto));
} catch (ServiceException e) {
throw unwrapAndThrowException(e);
}
}
@Override
public GetTaskReportsResponse getTaskReports(GetTaskReportsRequest request)
throws IOException {
GetTaskReportsRequestProto requestProto = ((GetTaskReportsRequestPBImpl)request).getProto();
try {
return new GetTaskReportsResponsePBImpl(proxy.getTaskReports(null, requestProto));
} catch (ServiceException e) {
throw unwrapAndThrowException(e);
}
}
@Override
public GetDiagnosticsResponse getDiagnostics(GetDiagnosticsRequest request)
throws IOException {
GetDiagnosticsRequestProto requestProto = ((GetDiagnosticsRequestPBImpl)request).getProto();
try {
return new GetDiagnosticsResponsePBImpl(proxy.getDiagnostics(null, requestProto));
} catch (ServiceException e) {
throw unwrapAndThrowException(e);
}
}
@Override
public GetDelegationTokenResponse getDelegationToken(
GetDelegationTokenRequest request) throws IOException {
GetDelegationTokenRequestProto requestProto = ((GetDelegationTokenRequestPBImpl)
request).getProto();
try {
return new GetDelegationTokenResponsePBImpl(proxy.getDelegationToken(
null, requestProto));
} catch (ServiceException e) {
throw unwrapAndThrowException(e);
}
}
@Override
public KillJobResponse killJob(KillJobRequest request)
throws IOException {
KillJobRequestProto requestProto = ((KillJobRequestPBImpl)request).getProto();
try {
return new KillJobResponsePBImpl(proxy.killJob(null, requestProto));
} catch (ServiceException e) {
throw unwrapAndThrowException(e);
}
}
@Override
public KillTaskResponse killTask(KillTaskRequest request)
throws IOException {
KillTaskRequestProto requestProto = ((KillTaskRequestPBImpl)request).getProto();
try {
return new KillTaskResponsePBImpl(proxy.killTask(null, requestProto));
} catch (ServiceException e) {
throw unwrapAndThrowException(e);
}
}
@Override
public KillTaskAttemptResponse killTaskAttempt(KillTaskAttemptRequest request)
throws IOException {
KillTaskAttemptRequestProto requestProto = ((KillTaskAttemptRequestPBImpl)request).getProto();
try {
return new KillTaskAttemptResponsePBImpl(proxy.killTaskAttempt(null, requestProto));
} catch (ServiceException e) {
throw unwrapAndThrowException(e);
}
}
@Override
public FailTaskAttemptResponse failTaskAttempt(FailTaskAttemptRequest request)
throws IOException {
FailTaskAttemptRequestProto requestProto = ((FailTaskAttemptRequestPBImpl)request).getProto();
try {
return new FailTaskAttemptResponsePBImpl(proxy.failTaskAttempt(null, requestProto));
} catch (ServiceException e) {
throw unwrapAndThrowException(e);
}
}
@Override
public RenewDelegationTokenResponse renewDelegationToken(
RenewDelegationTokenRequest request) throws IOException {
RenewDelegationTokenRequestProto requestProto =
((RenewDelegationTokenRequestPBImpl) request).getProto();
try {
return new RenewDelegationTokenResponsePBImpl(proxy.renewDelegationToken(
null, requestProto));
} catch (ServiceException e) {
throw unwrapAndThrowException(e);
}
}
@Override
public CancelDelegationTokenResponse cancelDelegationToken(
CancelDelegationTokenRequest request) throws IOException {
CancelDelegationTokenRequestProto requestProto =
((CancelDelegationTokenRequestPBImpl) request).getProto();
try {
return new CancelDelegationTokenResponsePBImpl(
proxy.cancelDelegationToken(null, requestProto));
} catch (ServiceException e) {
throw unwrapAndThrowException(e);
}
}
private IOException unwrapAndThrowException(ServiceException se) {
if (se.getCause() instanceof RemoteException) {
return ((RemoteException) se.getCause()).unwrapRemoteException();
} else if (se.getCause() instanceof IOException) {
return (IOException)se.getCause();
} else {
throw new UndeclaredThrowableException(se.getCause());
}
}
}
| 14,509 | 47.528428 | 128 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/impl/pb/client/HSClientProtocolPBClientImpl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.api.impl.pb.client;
import java.io.IOException;
import java.net.InetSocketAddress;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.ipc.ProtobufRpcEngine;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.mapreduce.v2.api.HSClientProtocol;
import org.apache.hadoop.mapreduce.v2.api.HSClientProtocolPB;
public class HSClientProtocolPBClientImpl extends MRClientProtocolPBClientImpl
implements HSClientProtocol {
public HSClientProtocolPBClientImpl(long clientVersion,
InetSocketAddress addr, Configuration conf) throws IOException {
super();
RPC.setProtocolEngine(conf, HSClientProtocolPB.class,
ProtobufRpcEngine.class);
proxy = (HSClientProtocolPB)RPC.getProxy(
HSClientProtocolPB.class, clientVersion, addr, conf);
}
}
| 1,641 | 39.04878 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/package-info.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
@InterfaceAudience.Private
package org.apache.hadoop.mapreduce.v2.util;
import org.apache.hadoop.classification.InterfaceAudience;
| 937 | 43.666667 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRApps.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.util;
import java.io.File;
import java.io.IOException;
import java.net.MalformedURLException;
import java.net.URI;
import java.net.URISyntaxException;
import java.security.AccessController;
import java.security.PrivilegedActionException;
import java.security.PrivilegedExceptionAction;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import com.google.common.annotations.VisibleForTesting;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapred.InvalidJobConfException;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.Task;
import org.apache.hadoop.mapred.TaskLog;
import org.apache.hadoop.mapreduce.JobID;
import org.apache.hadoop.mapreduce.MRConfig;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.mapreduce.TaskAttemptID;
import org.apache.hadoop.mapreduce.TaskID;
import org.apache.hadoop.mapreduce.TypeConverter;
import org.apache.hadoop.mapreduce.filecache.DistributedCache;
import org.apache.hadoop.mapreduce.v2.api.records.JobId;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptState;
import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
import org.apache.hadoop.mapreduce.v2.api.records.TaskState;
import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
import org.apache.hadoop.util.ApplicationClassLoader;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.yarn.ContainerLogAppender;
import org.apache.hadoop.yarn.ContainerRollingLogAppender;
import org.apache.hadoop.yarn.api.ApplicationConstants;
import org.apache.hadoop.yarn.api.ApplicationConstants.Environment;
import org.apache.hadoop.yarn.api.records.LocalResource;
import org.apache.hadoop.yarn.api.records.LocalResourceType;
import org.apache.hadoop.yarn.api.records.LocalResourceVisibility;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
import org.apache.hadoop.yarn.util.Apps;
import org.apache.hadoop.yarn.util.ConverterUtils;
/**
* Helper class for MR applications
*/
@Private
@Unstable
public class MRApps extends Apps {
public static final Log LOG = LogFactory.getLog(MRApps.class);
public static String toString(JobId jid) {
return jid.toString();
}
public static JobId toJobID(String jid) {
return TypeConverter.toYarn(JobID.forName(jid));
}
public static String toString(TaskId tid) {
return tid.toString();
}
public static TaskId toTaskID(String tid) {
return TypeConverter.toYarn(TaskID.forName(tid));
}
public static String toString(TaskAttemptId taid) {
return taid.toString();
}
public static TaskAttemptId toTaskAttemptID(String taid) {
return TypeConverter.toYarn(TaskAttemptID.forName(taid));
}
public static String taskSymbol(TaskType type) {
switch (type) {
case MAP: return "m";
case REDUCE: return "r";
}
throw new YarnRuntimeException("Unknown task type: "+ type.toString());
}
public static enum TaskAttemptStateUI {
NEW(
new TaskAttemptState[] { TaskAttemptState.NEW,
TaskAttemptState.STARTING }),
RUNNING(
new TaskAttemptState[] { TaskAttemptState.RUNNING,
TaskAttemptState.COMMIT_PENDING }),
SUCCESSFUL(new TaskAttemptState[] { TaskAttemptState.SUCCEEDED}),
FAILED(new TaskAttemptState[] { TaskAttemptState.FAILED}),
KILLED(new TaskAttemptState[] { TaskAttemptState.KILLED});
private final List<TaskAttemptState> correspondingStates;
private TaskAttemptStateUI(TaskAttemptState[] correspondingStates) {
this.correspondingStates = Arrays.asList(correspondingStates);
}
public boolean correspondsTo(TaskAttemptState state) {
return this.correspondingStates.contains(state);
}
}
public static enum TaskStateUI {
RUNNING(
new TaskState[]{TaskState.RUNNING}),
PENDING(new TaskState[]{TaskState.SCHEDULED}),
COMPLETED(new TaskState[]{TaskState.SUCCEEDED, TaskState.FAILED, TaskState.KILLED});
private final List<TaskState> correspondingStates;
private TaskStateUI(TaskState[] correspondingStates) {
this.correspondingStates = Arrays.asList(correspondingStates);
}
public boolean correspondsTo(TaskState state) {
return this.correspondingStates.contains(state);
}
}
public static TaskType taskType(String symbol) {
// JDK 7 supports switch on strings
if (symbol.equals("m")) return TaskType.MAP;
if (symbol.equals("r")) return TaskType.REDUCE;
throw new YarnRuntimeException("Unknown task symbol: "+ symbol);
}
public static TaskAttemptStateUI taskAttemptState(String attemptStateStr) {
return TaskAttemptStateUI.valueOf(attemptStateStr);
}
public static TaskStateUI taskState(String taskStateStr) {
return TaskStateUI.valueOf(taskStateStr);
}
// gets the base name of the MapReduce framework or null if no
// framework was configured
private static String getMRFrameworkName(Configuration conf) {
String frameworkName = null;
String framework =
conf.get(MRJobConfig.MAPREDUCE_APPLICATION_FRAMEWORK_PATH, "");
if (!framework.isEmpty()) {
URI uri;
try {
uri = new URI(framework);
} catch (URISyntaxException e) {
throw new IllegalArgumentException("Unable to parse '" + framework
+ "' as a URI, check the setting for "
+ MRJobConfig.MAPREDUCE_APPLICATION_FRAMEWORK_PATH, e);
}
frameworkName = uri.getFragment();
if (frameworkName == null) {
frameworkName = new Path(uri).getName();
}
}
return frameworkName;
}
private static void setMRFrameworkClasspath(
Map<String, String> environment, Configuration conf) throws IOException {
// Propagate the system classpath when using the mini cluster
if (conf.getBoolean(YarnConfiguration.IS_MINI_YARN_CLUSTER, false)) {
MRApps.addToEnvironment(environment, Environment.CLASSPATH.name(),
System.getProperty("java.class.path"), conf);
}
boolean crossPlatform =
conf.getBoolean(MRConfig.MAPREDUCE_APP_SUBMISSION_CROSS_PLATFORM,
MRConfig.DEFAULT_MAPREDUCE_APP_SUBMISSION_CROSS_PLATFORM);
// if the framework is specified then only use the MR classpath
String frameworkName = getMRFrameworkName(conf);
if (frameworkName == null) {
// Add standard Hadoop classes
for (String c : conf.getStrings(YarnConfiguration.YARN_APPLICATION_CLASSPATH,
crossPlatform
? YarnConfiguration.DEFAULT_YARN_CROSS_PLATFORM_APPLICATION_CLASSPATH
: YarnConfiguration.DEFAULT_YARN_APPLICATION_CLASSPATH)) {
MRApps.addToEnvironment(environment, Environment.CLASSPATH.name(),
c.trim(), conf);
}
}
boolean foundFrameworkInClasspath = (frameworkName == null);
for (String c : conf.getStrings(MRJobConfig.MAPREDUCE_APPLICATION_CLASSPATH,
crossPlatform ?
StringUtils.getStrings(MRJobConfig.DEFAULT_MAPREDUCE_CROSS_PLATFORM_APPLICATION_CLASSPATH)
: StringUtils.getStrings(MRJobConfig.DEFAULT_MAPREDUCE_APPLICATION_CLASSPATH))) {
MRApps.addToEnvironment(environment, Environment.CLASSPATH.name(),
c.trim(), conf);
if (!foundFrameworkInClasspath) {
foundFrameworkInClasspath = c.contains(frameworkName);
}
}
if (!foundFrameworkInClasspath) {
throw new IllegalArgumentException(
"Could not locate MapReduce framework name '" + frameworkName
+ "' in " + MRJobConfig.MAPREDUCE_APPLICATION_CLASSPATH);
}
// TODO: Remove duplicates.
}
@SuppressWarnings("deprecation")
public static void setClasspath(Map<String, String> environment,
Configuration conf) throws IOException {
boolean userClassesTakesPrecedence =
conf.getBoolean(MRJobConfig.MAPREDUCE_JOB_USER_CLASSPATH_FIRST, false);
String classpathEnvVar =
conf.getBoolean(MRJobConfig.MAPREDUCE_JOB_CLASSLOADER, false)
? Environment.APP_CLASSPATH.name() : Environment.CLASSPATH.name();
MRApps.addToEnvironment(environment,
classpathEnvVar, crossPlatformifyMREnv(conf, Environment.PWD), conf);
if (!userClassesTakesPrecedence) {
MRApps.setMRFrameworkClasspath(environment, conf);
}
MRApps.addToEnvironment(
environment,
classpathEnvVar,
MRJobConfig.JOB_JAR + Path.SEPARATOR + MRJobConfig.JOB_JAR, conf);
MRApps.addToEnvironment(
environment,
classpathEnvVar,
MRJobConfig.JOB_JAR + Path.SEPARATOR + "classes" + Path.SEPARATOR, conf);
MRApps.addToEnvironment(
environment,
classpathEnvVar,
MRJobConfig.JOB_JAR + Path.SEPARATOR + "lib" + Path.SEPARATOR + "*", conf);
MRApps.addToEnvironment(
environment,
classpathEnvVar,
crossPlatformifyMREnv(conf, Environment.PWD) + Path.SEPARATOR + "*", conf);
// a * in the classpath will only find a .jar, so we need to filter out
// all .jars and add everything else
addToClasspathIfNotJar(DistributedCache.getFileClassPaths(conf),
DistributedCache.getCacheFiles(conf),
conf,
environment, classpathEnvVar);
addToClasspathIfNotJar(DistributedCache.getArchiveClassPaths(conf),
DistributedCache.getCacheArchives(conf),
conf,
environment, classpathEnvVar);
if (userClassesTakesPrecedence) {
MRApps.setMRFrameworkClasspath(environment, conf);
}
}
/**
* Add the paths to the classpath if they are not jars
* @param paths the paths to add to the classpath
* @param withLinks the corresponding paths that may have a link name in them
* @param conf used to resolve the paths
* @param environment the environment to update CLASSPATH in
* @throws IOException if there is an error resolving any of the paths.
*/
private static void addToClasspathIfNotJar(Path[] paths,
URI[] withLinks, Configuration conf,
Map<String, String> environment,
String classpathEnvVar) throws IOException {
if (paths != null) {
HashMap<Path, String> linkLookup = new HashMap<Path, String>();
if (withLinks != null) {
for (URI u: withLinks) {
Path p = new Path(u);
FileSystem remoteFS = p.getFileSystem(conf);
p = remoteFS.resolvePath(p.makeQualified(remoteFS.getUri(),
remoteFS.getWorkingDirectory()));
String name = (null == u.getFragment())
? p.getName() : u.getFragment();
if (!StringUtils.toLowerCase(name).endsWith(".jar")) {
linkLookup.put(p, name);
}
}
}
for (Path p : paths) {
FileSystem remoteFS = p.getFileSystem(conf);
p = remoteFS.resolvePath(p.makeQualified(remoteFS.getUri(),
remoteFS.getWorkingDirectory()));
String name = linkLookup.get(p);
if (name == null) {
name = p.getName();
}
if(!StringUtils.toLowerCase(name).endsWith(".jar")) {
MRApps.addToEnvironment(
environment,
classpathEnvVar,
crossPlatformifyMREnv(conf, Environment.PWD) + Path.SEPARATOR + name, conf);
}
}
}
}
/**
* Creates and sets a {@link ApplicationClassLoader} on the given
* configuration and as the thread context classloader, if
* {@link MRJobConfig#MAPREDUCE_JOB_CLASSLOADER} is set to true, and
* the APP_CLASSPATH environment variable is set.
* @param conf
* @throws IOException
*/
public static void setJobClassLoader(Configuration conf)
throws IOException {
setClassLoader(createJobClassLoader(conf), conf);
}
/**
* Creates a {@link ApplicationClassLoader} if
* {@link MRJobConfig#MAPREDUCE_JOB_CLASSLOADER} is set to true, and
* the APP_CLASSPATH environment variable is set.
* @param conf
* @return the created job classloader, or null if the job classloader is not
* enabled or the APP_CLASSPATH environment variable is not set
* @throws IOException
*/
public static ClassLoader createJobClassLoader(Configuration conf)
throws IOException {
ClassLoader jobClassLoader = null;
if (conf.getBoolean(MRJobConfig.MAPREDUCE_JOB_CLASSLOADER, false)) {
String appClasspath = System.getenv(Environment.APP_CLASSPATH.key());
if (appClasspath == null) {
LOG.warn("Not creating job classloader since APP_CLASSPATH is not set.");
} else {
LOG.info("Creating job classloader");
if (LOG.isDebugEnabled()) {
LOG.debug("APP_CLASSPATH=" + appClasspath);
}
String[] systemClasses = getSystemClasses(conf);
jobClassLoader = createJobClassLoader(appClasspath,
systemClasses);
}
}
return jobClassLoader;
}
/**
* Sets the provided classloader on the given configuration and as the thread
* context classloader if the classloader is not null.
* @param classLoader
* @param conf
*/
public static void setClassLoader(ClassLoader classLoader,
Configuration conf) {
if (classLoader != null) {
LOG.info("Setting classloader " + classLoader +
" on the configuration and as the thread context classloader");
conf.setClassLoader(classLoader);
Thread.currentThread().setContextClassLoader(classLoader);
}
}
@VisibleForTesting
static String[] getSystemClasses(Configuration conf) {
return conf.getTrimmedStrings(
MRJobConfig.MAPREDUCE_JOB_CLASSLOADER_SYSTEM_CLASSES);
}
private static ClassLoader createJobClassLoader(final String appClasspath,
final String[] systemClasses) throws IOException {
try {
return AccessController.doPrivileged(
new PrivilegedExceptionAction<ClassLoader>() {
@Override
public ClassLoader run() throws MalformedURLException {
return new ApplicationClassLoader(appClasspath,
MRApps.class.getClassLoader(), Arrays.asList(systemClasses));
}
});
} catch (PrivilegedActionException e) {
Throwable t = e.getCause();
if (t instanceof MalformedURLException) {
throw (MalformedURLException) t;
}
throw new IOException(e);
}
}
private static final String STAGING_CONSTANT = ".staging";
public static Path getStagingAreaDir(Configuration conf, String user) {
return new Path(conf.get(MRJobConfig.MR_AM_STAGING_DIR,
MRJobConfig.DEFAULT_MR_AM_STAGING_DIR)
+ Path.SEPARATOR + user + Path.SEPARATOR + STAGING_CONSTANT);
}
public static String getJobFile(Configuration conf, String user,
org.apache.hadoop.mapreduce.JobID jobId) {
Path jobFile = new Path(MRApps.getStagingAreaDir(conf, user),
jobId.toString() + Path.SEPARATOR + MRJobConfig.JOB_CONF_FILE);
return jobFile.toString();
}
public static Path getEndJobCommitSuccessFile(Configuration conf, String user,
JobId jobId) {
Path endCommitFile = new Path(MRApps.getStagingAreaDir(conf, user),
jobId.toString() + Path.SEPARATOR + "COMMIT_SUCCESS");
return endCommitFile;
}
public static Path getEndJobCommitFailureFile(Configuration conf, String user,
JobId jobId) {
Path endCommitFile = new Path(MRApps.getStagingAreaDir(conf, user),
jobId.toString() + Path.SEPARATOR + "COMMIT_FAIL");
return endCommitFile;
}
public static Path getStartJobCommitFile(Configuration conf, String user,
JobId jobId) {
Path startCommitFile = new Path(MRApps.getStagingAreaDir(conf, user),
jobId.toString() + Path.SEPARATOR + "COMMIT_STARTED");
return startCommitFile;
}
public static void setupDistributedCache(
Configuration conf,
Map<String, LocalResource> localResources)
throws IOException {
// Cache archives
parseDistributedCacheArtifacts(conf, localResources,
LocalResourceType.ARCHIVE,
DistributedCache.getCacheArchives(conf),
DistributedCache.getArchiveTimestamps(conf),
getFileSizes(conf, MRJobConfig.CACHE_ARCHIVES_SIZES),
DistributedCache.getArchiveVisibilities(conf));
// Cache files
parseDistributedCacheArtifacts(conf,
localResources,
LocalResourceType.FILE,
DistributedCache.getCacheFiles(conf),
DistributedCache.getFileTimestamps(conf),
getFileSizes(conf, MRJobConfig.CACHE_FILES_SIZES),
DistributedCache.getFileVisibilities(conf));
}
/**
* Set up the DistributedCache related configs to make
* {@link DistributedCache#getLocalCacheFiles(Configuration)}
* and
* {@link DistributedCache#getLocalCacheArchives(Configuration)}
* working.
* @param conf
* @throws java.io.IOException
*/
public static void setupDistributedCacheLocal(Configuration conf)
throws IOException {
String localWorkDir = System.getenv("PWD");
// ^ ^ all symlinks are created in the current work-dir
// Update the configuration object with localized archives.
URI[] cacheArchives = DistributedCache.getCacheArchives(conf);
if (cacheArchives != null) {
List<String> localArchives = new ArrayList<String>();
for (int i = 0; i < cacheArchives.length; ++i) {
URI u = cacheArchives[i];
Path p = new Path(u);
Path name =
new Path((null == u.getFragment()) ? p.getName()
: u.getFragment());
String linkName = name.toUri().getPath();
localArchives.add(new Path(localWorkDir, linkName).toUri().getPath());
}
if (!localArchives.isEmpty()) {
conf.set(MRJobConfig.CACHE_LOCALARCHIVES, StringUtils
.arrayToString(localArchives.toArray(new String[localArchives
.size()])));
}
}
// Update the configuration object with localized files.
URI[] cacheFiles = DistributedCache.getCacheFiles(conf);
if (cacheFiles != null) {
List<String> localFiles = new ArrayList<String>();
for (int i = 0; i < cacheFiles.length; ++i) {
URI u = cacheFiles[i];
Path p = new Path(u);
Path name =
new Path((null == u.getFragment()) ? p.getName()
: u.getFragment());
String linkName = name.toUri().getPath();
localFiles.add(new Path(localWorkDir, linkName).toUri().getPath());
}
if (!localFiles.isEmpty()) {
conf.set(MRJobConfig.CACHE_LOCALFILES,
StringUtils.arrayToString(localFiles
.toArray(new String[localFiles.size()])));
}
}
}
private static String getResourceDescription(LocalResourceType type) {
if(type == LocalResourceType.ARCHIVE || type == LocalResourceType.PATTERN) {
return "cache archive (" + MRJobConfig.CACHE_ARCHIVES + ") ";
}
return "cache file (" + MRJobConfig.CACHE_FILES + ") ";
}
private static String toString(org.apache.hadoop.yarn.api.records.URL url) {
StringBuffer b = new StringBuffer();
b.append(url.getScheme()).append("://").append(url.getHost());
if(url.getPort() >= 0) {
b.append(":").append(url.getPort());
}
b.append(url.getFile());
return b.toString();
}
// TODO - Move this to MR!
// Use TaskDistributedCacheManager.CacheFiles.makeCacheFiles(URI[],
// long[], boolean[], Path[], FileType)
private static void parseDistributedCacheArtifacts(
Configuration conf,
Map<String, LocalResource> localResources,
LocalResourceType type,
URI[] uris, long[] timestamps, long[] sizes, boolean visibilities[])
throws IOException {
if (uris != null) {
// Sanity check
if ((uris.length != timestamps.length) || (uris.length != sizes.length) ||
(uris.length != visibilities.length)) {
throw new IllegalArgumentException("Invalid specification for " +
"distributed-cache artifacts of type " + type + " :" +
" #uris=" + uris.length +
" #timestamps=" + timestamps.length +
" #visibilities=" + visibilities.length
);
}
for (int i = 0; i < uris.length; ++i) {
URI u = uris[i];
Path p = new Path(u);
FileSystem remoteFS = p.getFileSystem(conf);
p = remoteFS.resolvePath(p.makeQualified(remoteFS.getUri(),
remoteFS.getWorkingDirectory()));
// Add URI fragment or just the filename
Path name = new Path((null == u.getFragment())
? p.getName()
: u.getFragment());
if (name.isAbsolute()) {
throw new IllegalArgumentException("Resource name must be relative");
}
String linkName = name.toUri().getPath();
LocalResource orig = localResources.get(linkName);
org.apache.hadoop.yarn.api.records.URL url =
ConverterUtils.getYarnUrlFromURI(p.toUri());
if(orig != null && !orig.getResource().equals(url)) {
LOG.warn(
getResourceDescription(orig.getType()) +
toString(orig.getResource()) + " conflicts with " +
getResourceDescription(type) + toString(url) +
" This will be an error in Hadoop 2.0");
continue;
}
localResources.put(linkName, LocalResource.newInstance(ConverterUtils
.getYarnUrlFromURI(p.toUri()), type, visibilities[i]
? LocalResourceVisibility.PUBLIC : LocalResourceVisibility.PRIVATE,
sizes[i], timestamps[i]));
}
}
}
// TODO - Move this to MR!
private static long[] getFileSizes(Configuration conf, String key) {
String[] strs = conf.getStrings(key);
if (strs == null) {
return null;
}
long[] result = new long[strs.length];
for(int i=0; i < strs.length; ++i) {
result[i] = Long.parseLong(strs[i]);
}
return result;
}
public static String getChildLogLevel(Configuration conf, boolean isMap) {
if (isMap) {
return conf.get(
MRJobConfig.MAP_LOG_LEVEL,
JobConf.DEFAULT_LOG_LEVEL.toString()
);
} else {
return conf.get(
MRJobConfig.REDUCE_LOG_LEVEL,
JobConf.DEFAULT_LOG_LEVEL.toString()
);
}
}
/**
* Add the JVM system properties necessary to configure
* {@link ContainerLogAppender} or
* {@link ContainerRollingLogAppender}.
*
* @param task for map/reduce, or null for app master
* @param vargs the argument list to append to
* @param conf configuration of MR job
*/
public static void addLog4jSystemProperties(Task task,
List<String> vargs, Configuration conf) {
String log4jPropertyFile =
conf.get(MRJobConfig.MAPREDUCE_JOB_LOG4J_PROPERTIES_FILE, "");
if (log4jPropertyFile.isEmpty()) {
vargs.add("-Dlog4j.configuration=container-log4j.properties");
} else {
URI log4jURI = null;
try {
log4jURI = new URI(log4jPropertyFile);
} catch (URISyntaxException e) {
throw new IllegalArgumentException(e);
}
Path log4jPath = new Path(log4jURI);
vargs.add("-Dlog4j.configuration="+log4jPath.getName());
}
long logSize;
String logLevel;
int numBackups;
if (task == null) {
logSize = conf.getLong(MRJobConfig.MR_AM_LOG_KB,
MRJobConfig.DEFAULT_MR_AM_LOG_KB) << 10;
logLevel = conf.get(
MRJobConfig.MR_AM_LOG_LEVEL, MRJobConfig.DEFAULT_MR_AM_LOG_LEVEL);
numBackups = conf.getInt(MRJobConfig.MR_AM_LOG_BACKUPS,
MRJobConfig.DEFAULT_MR_AM_LOG_BACKUPS);
} else {
logSize = TaskLog.getTaskLogLimitBytes(conf);
logLevel = getChildLogLevel(conf, task.isMapTask());
numBackups = conf.getInt(MRJobConfig.TASK_LOG_BACKUPS,
MRJobConfig.DEFAULT_TASK_LOG_BACKUPS);
}
vargs.add("-D" + YarnConfiguration.YARN_APP_CONTAINER_LOG_DIR + "=" +
ApplicationConstants.LOG_DIR_EXPANSION_VAR);
vargs.add(
"-D" + YarnConfiguration.YARN_APP_CONTAINER_LOG_SIZE + "=" + logSize);
if (logSize > 0L && numBackups > 0) {
// log should be rolled
vargs.add("-D" + YarnConfiguration.YARN_APP_CONTAINER_LOG_BACKUPS + "="
+ numBackups);
vargs.add("-Dhadoop.root.logger=" + logLevel + ",CRLA");
} else {
vargs.add("-Dhadoop.root.logger=" + logLevel + ",CLA");
}
vargs.add("-Dhadoop.root.logfile=" + TaskLog.LogName.SYSLOG);
if ( task != null
&& !task.isMapTask()
&& conf.getBoolean(MRJobConfig.REDUCE_SEPARATE_SHUFFLE_LOG,
MRJobConfig.DEFAULT_REDUCE_SEPARATE_SHUFFLE_LOG)) {
final int numShuffleBackups = conf.getInt(MRJobConfig.SHUFFLE_LOG_BACKUPS,
MRJobConfig.DEFAULT_SHUFFLE_LOG_BACKUPS);
final long shuffleLogSize = conf.getLong(MRJobConfig.SHUFFLE_LOG_KB,
MRJobConfig.DEFAULT_SHUFFLE_LOG_KB) << 10;
final String shuffleLogger = logLevel
+ (shuffleLogSize > 0L && numShuffleBackups > 0
? ",shuffleCRLA"
: ",shuffleCLA");
vargs.add("-D" + MRJobConfig.MR_PREFIX
+ "shuffle.logger=" + shuffleLogger);
vargs.add("-D" + MRJobConfig.MR_PREFIX
+ "shuffle.logfile=" + TaskLog.LogName.SYSLOG + ".shuffle");
vargs.add("-D" + MRJobConfig.MR_PREFIX
+ "shuffle.log.filesize=" + shuffleLogSize);
vargs.add("-D" + MRJobConfig.MR_PREFIX
+ "shuffle.log.backups=" + numShuffleBackups);
}
}
public static void setEnvFromInputString(Map<String, String> env,
String envString, Configuration conf) {
String classPathSeparator =
conf.getBoolean(MRConfig.MAPREDUCE_APP_SUBMISSION_CROSS_PLATFORM,
MRConfig.DEFAULT_MAPREDUCE_APP_SUBMISSION_CROSS_PLATFORM)
? ApplicationConstants.CLASS_PATH_SEPARATOR : File.pathSeparator;
Apps.setEnvFromInputString(env, envString, classPathSeparator);
}
@Public
@Unstable
public static void addToEnvironment(Map<String, String> environment,
String variable, String value, Configuration conf) {
String classPathSeparator =
conf.getBoolean(MRConfig.MAPREDUCE_APP_SUBMISSION_CROSS_PLATFORM,
MRConfig.DEFAULT_MAPREDUCE_APP_SUBMISSION_CROSS_PLATFORM)
? ApplicationConstants.CLASS_PATH_SEPARATOR : File.pathSeparator;
Apps.addToEnvironment(environment, variable, value, classPathSeparator);
}
public static String crossPlatformifyMREnv(Configuration conf, Environment env) {
boolean crossPlatform =
conf.getBoolean(MRConfig.MAPREDUCE_APP_SUBMISSION_CROSS_PLATFORM,
MRConfig.DEFAULT_MAPREDUCE_APP_SUBMISSION_CROSS_PLATFORM);
return crossPlatform ? env.$$() : env.$();
}
/**
* Return lines for system property keys and values per configuration.
*
* @return the formatted string for the system property lines or null if no
* properties are specified.
*/
public static String getSystemPropertiesToLog(Configuration conf) {
String key = conf.get(MRJobConfig.MAPREDUCE_JVM_SYSTEM_PROPERTIES_TO_LOG,
MRJobConfig.DEFAULT_MAPREDUCE_JVM_SYSTEM_PROPERTIES_TO_LOG);
if (key != null) {
key = key.trim(); // trim leading and trailing whitespace from the config
if (!key.isEmpty()) {
String[] props = key.split(",");
if (props.length > 0) {
StringBuilder sb = new StringBuilder();
sb.append("\n/************************************************************\n");
sb.append("[system properties]\n");
for (String prop: props) {
prop = prop.trim(); // trim leading and trailing whitespace
if (!prop.isEmpty()) {
sb.append(prop).append(": ").append(System.getProperty(prop)).append('\n');
}
}
sb.append("************************************************************/");
return sb.toString();
}
}
}
return null;
}
}
| 29,187 | 37.005208 | 102 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRProtoUtils.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.util;
import org.apache.hadoop.mapreduce.v2.api.records.JobState;
import org.apache.hadoop.mapreduce.v2.api.records.Phase;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptCompletionEventStatus;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptState;
import org.apache.hadoop.mapreduce.v2.api.records.TaskState;
import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
import org.apache.hadoop.mapreduce.v2.proto.MRProtos.JobStateProto;
import org.apache.hadoop.mapreduce.v2.proto.MRProtos.PhaseProto;
import org.apache.hadoop.mapreduce.v2.proto.MRProtos.TaskAttemptCompletionEventStatusProto;
import org.apache.hadoop.mapreduce.v2.proto.MRProtos.TaskAttemptStateProto;
import org.apache.hadoop.mapreduce.v2.proto.MRProtos.TaskStateProto;
import org.apache.hadoop.mapreduce.v2.proto.MRProtos.TaskTypeProto;
public class MRProtoUtils {
/*
* JobState
*/
private static String JOB_STATE_PREFIX = "J_";
public static JobStateProto convertToProtoFormat(JobState e) {
return JobStateProto.valueOf(JOB_STATE_PREFIX + e.name());
}
public static JobState convertFromProtoFormat(JobStateProto e) {
return JobState.valueOf(e.name().replace(JOB_STATE_PREFIX, ""));
}
/*
* Phase
*/
private static String PHASE_PREFIX = "P_";
public static PhaseProto convertToProtoFormat(Phase e) {
return PhaseProto.valueOf(PHASE_PREFIX + e.name());
}
public static Phase convertFromProtoFormat(PhaseProto e) {
return Phase.valueOf(e.name().replace(PHASE_PREFIX, ""));
}
/*
* TaskAttemptCompletionEventStatus
*/
private static String TACE_PREFIX = "TACE_";
public static TaskAttemptCompletionEventStatusProto convertToProtoFormat(TaskAttemptCompletionEventStatus e) {
return TaskAttemptCompletionEventStatusProto.valueOf(TACE_PREFIX + e.name());
}
public static TaskAttemptCompletionEventStatus convertFromProtoFormat(TaskAttemptCompletionEventStatusProto e) {
return TaskAttemptCompletionEventStatus.valueOf(e.name().replace(TACE_PREFIX, ""));
}
/*
* TaskAttemptState
*/
private static String TASK_ATTEMPT_STATE_PREFIX = "TA_";
public static TaskAttemptStateProto convertToProtoFormat(TaskAttemptState e) {
return TaskAttemptStateProto.valueOf(TASK_ATTEMPT_STATE_PREFIX + e.name());
}
public static TaskAttemptState convertFromProtoFormat(TaskAttemptStateProto e) {
return TaskAttemptState.valueOf(e.name().replace(TASK_ATTEMPT_STATE_PREFIX, ""));
}
/*
* TaskState
*/
private static String TASK_STATE_PREFIX = "TS_";
public static TaskStateProto convertToProtoFormat(TaskState e) {
return TaskStateProto.valueOf(TASK_STATE_PREFIX + e.name());
}
public static TaskState convertFromProtoFormat(TaskStateProto e) {
return TaskState.valueOf(e.name().replace(TASK_STATE_PREFIX, ""));
}
/*
* TaskType
*/
public static TaskTypeProto convertToProtoFormat(TaskType e) {
return TaskTypeProto.valueOf(e.name());
}
public static TaskType convertFromProtoFormat(TaskTypeProto e) {
return TaskType.valueOf(e.name());
}
}
| 3,925 | 37.871287 | 114 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRWebAppUtil.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.util;
import com.google.common.base.Joiner;
import com.google.common.base.Splitter;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Evolving;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.http.HttpConfig;
import org.apache.hadoop.mapreduce.JobID;
import org.apache.hadoop.mapreduce.TypeConverter;
import org.apache.hadoop.mapreduce.v2.jobhistory.JHAdminConfig;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.ipc.RPCUtil;
import java.net.InetAddress;
import java.net.InetSocketAddress;
import java.net.UnknownHostException;
import java.util.Iterator;
import static org.apache.hadoop.http.HttpConfig.Policy;
@Private
@Evolving
public class MRWebAppUtil {
private static final Splitter ADDR_SPLITTER = Splitter.on(':').trimResults();
private static final Joiner JOINER = Joiner.on("");
private static Policy httpPolicyInYarn;
private static Policy httpPolicyInJHS;
public static void initialize(Configuration conf) {
setHttpPolicyInYARN(conf.get(
YarnConfiguration.YARN_HTTP_POLICY_KEY,
YarnConfiguration.YARN_HTTP_POLICY_DEFAULT));
setHttpPolicyInJHS(conf.get(JHAdminConfig.MR_HS_HTTP_POLICY,
JHAdminConfig.DEFAULT_MR_HS_HTTP_POLICY));
}
private static void setHttpPolicyInJHS(String policy) {
MRWebAppUtil.httpPolicyInJHS = Policy.fromString(policy);
}
private static void setHttpPolicyInYARN(String policy) {
MRWebAppUtil.httpPolicyInYarn = Policy.fromString(policy);
}
public static Policy getJHSHttpPolicy() {
return MRWebAppUtil.httpPolicyInJHS;
}
public static Policy getYARNHttpPolicy() {
return MRWebAppUtil.httpPolicyInYarn;
}
public static String getYARNWebappScheme() {
return httpPolicyInYarn == HttpConfig.Policy.HTTPS_ONLY ? "https://"
: "http://";
}
public static String getJHSWebappScheme() {
return httpPolicyInJHS == HttpConfig.Policy.HTTPS_ONLY ? "https://"
: "http://";
}
public static void setJHSWebappURLWithoutScheme(Configuration conf,
String hostAddress) {
if (httpPolicyInJHS == Policy.HTTPS_ONLY) {
conf.set(JHAdminConfig.MR_HISTORY_WEBAPP_HTTPS_ADDRESS, hostAddress);
} else {
conf.set(JHAdminConfig.MR_HISTORY_WEBAPP_ADDRESS, hostAddress);
}
}
public static String getJHSWebappURLWithoutScheme(Configuration conf) {
if (httpPolicyInJHS == Policy.HTTPS_ONLY) {
return conf.get(JHAdminConfig.MR_HISTORY_WEBAPP_HTTPS_ADDRESS,
JHAdminConfig.DEFAULT_MR_HISTORY_WEBAPP_HTTPS_ADDRESS);
} else {
return conf.get(JHAdminConfig.MR_HISTORY_WEBAPP_ADDRESS,
JHAdminConfig.DEFAULT_MR_HISTORY_WEBAPP_ADDRESS);
}
}
public static String getJHSWebappURLWithScheme(Configuration conf) {
return getJHSWebappScheme() + getJHSWebappURLWithoutScheme(conf);
}
public static InetSocketAddress getJHSWebBindAddress(Configuration conf) {
if (httpPolicyInJHS == Policy.HTTPS_ONLY) {
return conf.getSocketAddr(
JHAdminConfig.MR_HISTORY_BIND_HOST,
JHAdminConfig.MR_HISTORY_WEBAPP_HTTPS_ADDRESS,
JHAdminConfig.DEFAULT_MR_HISTORY_WEBAPP_HTTPS_ADDRESS,
JHAdminConfig.DEFAULT_MR_HISTORY_WEBAPP_HTTPS_PORT);
} else {
return conf.getSocketAddr(
JHAdminConfig.MR_HISTORY_BIND_HOST,
JHAdminConfig.MR_HISTORY_WEBAPP_ADDRESS,
JHAdminConfig.DEFAULT_MR_HISTORY_WEBAPP_ADDRESS,
JHAdminConfig.DEFAULT_MR_HISTORY_WEBAPP_PORT);
}
}
public static String getApplicationWebURLOnJHSWithoutScheme(Configuration conf,
ApplicationId appId)
throws UnknownHostException {
//construct the history url for job
String addr = getJHSWebappURLWithoutScheme(conf);
Iterator<String> it = ADDR_SPLITTER.split(addr).iterator();
it.next(); // ignore the bind host
String port = it.next();
// Use hs address to figure out the host for webapp
addr = conf.get(JHAdminConfig.MR_HISTORY_ADDRESS,
JHAdminConfig.DEFAULT_MR_HISTORY_ADDRESS);
String host = ADDR_SPLITTER.split(addr).iterator().next();
String hsAddress = JOINER.join(host, ":", port);
InetSocketAddress address = NetUtils.createSocketAddr(
hsAddress, getDefaultJHSWebappPort(),
getDefaultJHSWebappURLWithoutScheme());
StringBuffer sb = new StringBuffer();
if (address.getAddress() != null &&
(address.getAddress().isAnyLocalAddress() ||
address.getAddress().isLoopbackAddress())) {
sb.append(InetAddress.getLocalHost().getCanonicalHostName());
} else {
sb.append(address.getHostName());
}
sb.append(":").append(address.getPort());
sb.append("/jobhistory/job/");
JobID jobId = TypeConverter.fromYarn(appId);
sb.append(jobId.toString());
return sb.toString();
}
public static String getApplicationWebURLOnJHSWithScheme(Configuration conf,
ApplicationId appId) throws UnknownHostException {
return getJHSWebappScheme()
+ getApplicationWebURLOnJHSWithoutScheme(conf, appId);
}
private static int getDefaultJHSWebappPort() {
return httpPolicyInJHS == Policy.HTTPS_ONLY ?
JHAdminConfig.DEFAULT_MR_HISTORY_WEBAPP_HTTPS_PORT:
JHAdminConfig.DEFAULT_MR_HISTORY_WEBAPP_PORT;
}
private static String getDefaultJHSWebappURLWithoutScheme() {
return httpPolicyInJHS == Policy.HTTPS_ONLY ?
JHAdminConfig.DEFAULT_MR_HISTORY_WEBAPP_HTTPS_ADDRESS :
JHAdminConfig.DEFAULT_MR_HISTORY_WEBAPP_ADDRESS;
}
public static String getAMWebappScheme(Configuration conf) {
return "http://";
}
}
| 6,631 | 36.681818 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRBuilderUtils.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.util;
import java.util.List;
import org.apache.hadoop.mapreduce.v2.api.records.AMInfo;
import org.apache.hadoop.mapreduce.v2.api.records.JobId;
import org.apache.hadoop.mapreduce.v2.api.records.JobReport;
import org.apache.hadoop.mapreduce.v2.api.records.JobState;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.util.Records;
public class MRBuilderUtils {
public static JobId newJobId(ApplicationId appId, int id) {
JobId jobId = Records.newRecord(JobId.class);
jobId.setAppId(appId);
jobId.setId(id);
return jobId;
}
public static JobId newJobId(long clusterTs, int appIdInt, int id) {
ApplicationId appId = ApplicationId.newInstance(clusterTs, appIdInt);
return MRBuilderUtils.newJobId(appId, id);
}
public static TaskId newTaskId(JobId jobId, int id, TaskType taskType) {
TaskId taskId = Records.newRecord(TaskId.class);
taskId.setJobId(jobId);
taskId.setId(id);
taskId.setTaskType(taskType);
return taskId;
}
public static TaskAttemptId newTaskAttemptId(TaskId taskId, int attemptId) {
TaskAttemptId taskAttemptId =
Records.newRecord(TaskAttemptId.class);
taskAttemptId.setTaskId(taskId);
taskAttemptId.setId(attemptId);
return taskAttemptId;
}
public static JobReport newJobReport(JobId jobId, String jobName,
String userName, JobState state, long submitTime, long startTime, long finishTime,
float setupProgress, float mapProgress, float reduceProgress,
float cleanupProgress, String jobFile, List<AMInfo> amInfos,
boolean isUber, String diagnostics) {
JobReport report = Records.newRecord(JobReport.class);
report.setJobId(jobId);
report.setJobName(jobName);
report.setUser(userName);
report.setJobState(state);
report.setSubmitTime(submitTime);
report.setStartTime(startTime);
report.setFinishTime(finishTime);
report.setSetupProgress(setupProgress);
report.setCleanupProgress(cleanupProgress);
report.setMapProgress(mapProgress);
report.setReduceProgress(reduceProgress);
report.setJobFile(jobFile);
report.setAMInfos(amInfos);
report.setIsUber(isUber);
report.setDiagnostics(diagnostics);
return report;
}
public static AMInfo newAMInfo(ApplicationAttemptId appAttemptId,
long startTime, ContainerId containerId, String nmHost, int nmPort,
int nmHttpPort) {
AMInfo amInfo = Records.newRecord(AMInfo.class);
amInfo.setAppAttemptId(appAttemptId);
amInfo.setStartTime(startTime);
amInfo.setContainerId(containerId);
amInfo.setNodeManagerHost(nmHost);
amInfo.setNodeManagerPort(nmPort);
amInfo.setNodeManagerHttpPort(nmHttpPort);
return amInfo;
}
}
| 3,880 | 37.425743 | 88 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapred/LocalDistributedCacheManager.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.File;
import java.io.IOException;
import java.net.MalformedURLException;
import java.net.URISyntaxException;
import java.net.URL;
import java.net.URLClassLoader;
import java.security.AccessController;
import java.security.PrivilegedAction;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import java.util.concurrent.ThreadFactory;
import java.util.concurrent.atomic.AtomicLong;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.FileContext;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.LocalDirAllocator;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapreduce.MRConfig;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.mapreduce.filecache.DistributedCache;
import org.apache.hadoop.mapreduce.v2.util.MRApps;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.yarn.api.records.LocalResource;
import org.apache.hadoop.yarn.api.records.LocalResourceType;
import org.apache.hadoop.yarn.util.ConverterUtils;
import org.apache.hadoop.yarn.util.FSDownload;
import com.google.common.collect.Maps;
import com.google.common.util.concurrent.ThreadFactoryBuilder;
/**
* A helper class for managing the distributed cache for {@link LocalJobRunner}.
*/
@SuppressWarnings("deprecation")
class LocalDistributedCacheManager {
public static final Log LOG =
LogFactory.getLog(LocalDistributedCacheManager.class);
private List<String> localArchives = new ArrayList<String>();
private List<String> localFiles = new ArrayList<String>();
private List<String> localClasspaths = new ArrayList<String>();
private List<File> symlinksCreated = new ArrayList<File>();
private boolean setupCalled = false;
/**
* Set up the distributed cache by localizing the resources, and updating
* the configuration with references to the localized resources.
* @param conf
* @throws IOException
*/
public void setup(JobConf conf) throws IOException {
File workDir = new File(System.getProperty("user.dir"));
// Generate YARN local resources objects corresponding to the distributed
// cache configuration
Map<String, LocalResource> localResources =
new LinkedHashMap<String, LocalResource>();
MRApps.setupDistributedCache(conf, localResources);
// Generating unique numbers for FSDownload.
AtomicLong uniqueNumberGenerator =
new AtomicLong(System.currentTimeMillis());
// Find which resources are to be put on the local classpath
Map<String, Path> classpaths = new HashMap<String, Path>();
Path[] archiveClassPaths = DistributedCache.getArchiveClassPaths(conf);
if (archiveClassPaths != null) {
for (Path p : archiveClassPaths) {
classpaths.put(p.toUri().getPath().toString(), p);
}
}
Path[] fileClassPaths = DistributedCache.getFileClassPaths(conf);
if (fileClassPaths != null) {
for (Path p : fileClassPaths) {
classpaths.put(p.toUri().getPath().toString(), p);
}
}
// Localize the resources
LocalDirAllocator localDirAllocator =
new LocalDirAllocator(MRConfig.LOCAL_DIR);
FileContext localFSFileContext = FileContext.getLocalFSFileContext();
UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
ExecutorService exec = null;
try {
ThreadFactory tf = new ThreadFactoryBuilder()
.setNameFormat("LocalDistributedCacheManager Downloader #%d")
.build();
exec = Executors.newCachedThreadPool(tf);
Path destPath = localDirAllocator.getLocalPathForWrite(".", conf);
Map<LocalResource, Future<Path>> resourcesToPaths = Maps.newHashMap();
for (LocalResource resource : localResources.values()) {
Callable<Path> download =
new FSDownload(localFSFileContext, ugi, conf, new Path(destPath,
Long.toString(uniqueNumberGenerator.incrementAndGet())),
resource);
Future<Path> future = exec.submit(download);
resourcesToPaths.put(resource, future);
}
for (Entry<String, LocalResource> entry : localResources.entrySet()) {
LocalResource resource = entry.getValue();
Path path;
try {
path = resourcesToPaths.get(resource).get();
} catch (InterruptedException e) {
throw new IOException(e);
} catch (ExecutionException e) {
throw new IOException(e);
}
String pathString = path.toUri().toString();
String link = entry.getKey();
String target = new File(path.toUri()).getPath();
symlink(workDir, target, link);
if (resource.getType() == LocalResourceType.ARCHIVE) {
localArchives.add(pathString);
} else if (resource.getType() == LocalResourceType.FILE) {
localFiles.add(pathString);
} else if (resource.getType() == LocalResourceType.PATTERN) {
//PATTERN is not currently used in local mode
throw new IllegalArgumentException("Resource type PATTERN is not " +
"implemented yet. " + resource.getResource());
}
Path resourcePath;
try {
resourcePath = ConverterUtils.getPathFromYarnURL(resource.getResource());
} catch (URISyntaxException e) {
throw new IOException(e);
}
LOG.info(String.format("Localized %s as %s", resourcePath, path));
String cp = resourcePath.toUri().getPath();
if (classpaths.keySet().contains(cp)) {
localClasspaths.add(path.toUri().getPath().toString());
}
}
} finally {
if (exec != null) {
exec.shutdown();
}
}
// Update the configuration object with localized data.
if (!localArchives.isEmpty()) {
conf.set(MRJobConfig.CACHE_LOCALARCHIVES, StringUtils
.arrayToString(localArchives.toArray(new String[localArchives
.size()])));
}
if (!localFiles.isEmpty()) {
conf.set(MRJobConfig.CACHE_LOCALFILES, StringUtils
.arrayToString(localFiles.toArray(new String[localArchives
.size()])));
}
setupCalled = true;
}
/**
* Utility method for creating a symlink and warning on errors.
*
* If link is null, does nothing.
*/
private void symlink(File workDir, String target, String link)
throws IOException {
if (link != null) {
link = workDir.toString() + Path.SEPARATOR + link;
File flink = new File(link);
if (!flink.exists()) {
LOG.info(String.format("Creating symlink: %s <- %s", target, link));
if (0 != FileUtil.symLink(target, link)) {
LOG.warn(String.format("Failed to create symlink: %s <- %s", target,
link));
} else {
symlinksCreated.add(new File(link));
}
}
}
}
/**
* Are the resources that should be added to the classpath?
* Should be called after setup().
*
*/
public boolean hasLocalClasspaths() {
if (!setupCalled) {
throw new IllegalStateException(
"hasLocalClasspaths() should be called after setup()");
}
return !localClasspaths.isEmpty();
}
/**
* Creates a class loader that includes the designated
* files and archives.
*/
public ClassLoader makeClassLoader(final ClassLoader parent)
throws MalformedURLException {
final URL[] urls = new URL[localClasspaths.size()];
for (int i = 0; i < localClasspaths.size(); ++i) {
urls[i] = new File(localClasspaths.get(i)).toURI().toURL();
LOG.info(urls[i]);
}
return AccessController.doPrivileged(new PrivilegedAction<ClassLoader>() {
@Override
public ClassLoader run() {
return new URLClassLoader(urls, parent);
}
});
}
public void close() throws IOException {
for (File symlink : symlinksCreated) {
if (!symlink.delete()) {
LOG.warn("Failed to delete symlink created by the local job runner: " +
symlink);
}
}
FileContext localFSFileContext = FileContext.getLocalFSFileContext();
for (String archive : localArchives) {
localFSFileContext.delete(new Path(archive), true);
}
for (String file : localFiles) {
localFSFileContext.delete(new Path(file), true);
}
}
}
| 9,589 | 35.884615 | 83 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapred/LocalJobRunner.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.IOException;
import java.io.OutputStream;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Random;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.ThreadFactory;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.ipc.ProtocolSignature;
import org.apache.hadoop.mapreduce.Cluster.JobTrackerStatus;
import org.apache.hadoop.mapreduce.ClusterMetrics;
import org.apache.hadoop.mapreduce.MRConfig;
import org.apache.hadoop.mapreduce.OutputFormat;
import org.apache.hadoop.mapreduce.QueueInfo;
import org.apache.hadoop.mapreduce.TaskCompletionEvent;
import org.apache.hadoop.mapreduce.TaskTrackerInfo;
import org.apache.hadoop.mapreduce.TaskType;
import org.apache.hadoop.mapreduce.protocol.ClientProtocol;
import org.apache.hadoop.mapreduce.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig;
import org.apache.hadoop.mapreduce.split.JobSplit.TaskSplitMetaInfo;
import org.apache.hadoop.mapreduce.split.SplitMetaInfoReader;
import org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl;
import org.apache.hadoop.mapreduce.v2.LogParams;
import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authorize.AccessControlList;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.util.ReflectionUtils;
import com.google.common.util.concurrent.ThreadFactoryBuilder;
/** Implements MapReduce locally, in-process, for debugging. */
@InterfaceAudience.Private
@InterfaceStability.Unstable
public class LocalJobRunner implements ClientProtocol {
public static final Log LOG =
LogFactory.getLog(LocalJobRunner.class);
/** The maximum number of map tasks to run in parallel in LocalJobRunner */
public static final String LOCAL_MAX_MAPS =
"mapreduce.local.map.tasks.maximum";
/** The maximum number of reduce tasks to run in parallel in LocalJobRunner */
public static final String LOCAL_MAX_REDUCES =
"mapreduce.local.reduce.tasks.maximum";
private FileSystem fs;
private HashMap<JobID, Job> jobs = new HashMap<JobID, Job>();
private JobConf conf;
private AtomicInteger map_tasks = new AtomicInteger(0);
private AtomicInteger reduce_tasks = new AtomicInteger(0);
final Random rand = new Random();
private LocalJobRunnerMetrics myMetrics = null;
private static final String jobDir = "localRunner/";
public long getProtocolVersion(String protocol, long clientVersion) {
return ClientProtocol.versionID;
}
@Override
public ProtocolSignature getProtocolSignature(String protocol,
long clientVersion, int clientMethodsHash) throws IOException {
return ProtocolSignature.getProtocolSignature(
this, protocol, clientVersion, clientMethodsHash);
}
private class Job extends Thread implements TaskUmbilicalProtocol {
// The job directory on the system: JobClient places job configurations here.
// This is analogous to JobTracker's system directory.
private Path systemJobDir;
private Path systemJobFile;
// The job directory for the task. Analagous to a task's job directory.
private Path localJobDir;
private Path localJobFile;
private JobID id;
private JobConf job;
private int numMapTasks;
private int numReduceTasks;
private float [] partialMapProgress;
private float [] partialReduceProgress;
private Counters [] mapCounters;
private Counters [] reduceCounters;
private JobStatus status;
private List<TaskAttemptID> mapIds = Collections.synchronizedList(
new ArrayList<TaskAttemptID>());
private JobProfile profile;
private FileSystem localFs;
boolean killed = false;
private LocalDistributedCacheManager localDistributedCacheManager;
public long getProtocolVersion(String protocol, long clientVersion) {
return TaskUmbilicalProtocol.versionID;
}
@Override
public ProtocolSignature getProtocolSignature(String protocol,
long clientVersion, int clientMethodsHash) throws IOException {
return ProtocolSignature.getProtocolSignature(
this, protocol, clientVersion, clientMethodsHash);
}
public Job(JobID jobid, String jobSubmitDir) throws IOException {
this.systemJobDir = new Path(jobSubmitDir);
this.systemJobFile = new Path(systemJobDir, "job.xml");
this.id = jobid;
JobConf conf = new JobConf(systemJobFile);
this.localFs = FileSystem.getLocal(conf);
String user = UserGroupInformation.getCurrentUser().getShortUserName();
this.localJobDir = localFs.makeQualified(new Path(
new Path(conf.getLocalPath(jobDir), user), jobid.toString()));
this.localJobFile = new Path(this.localJobDir, id + ".xml");
// Manage the distributed cache. If there are files to be copied,
// this will trigger localFile to be re-written again.
localDistributedCacheManager = new LocalDistributedCacheManager();
localDistributedCacheManager.setup(conf);
// Write out configuration file. Instead of copying it from
// systemJobFile, we re-write it, since setup(), above, may have
// updated it.
OutputStream out = localFs.create(localJobFile);
try {
conf.writeXml(out);
} finally {
out.close();
}
this.job = new JobConf(localJobFile);
// Job (the current object) is a Thread, so we wrap its class loader.
if (localDistributedCacheManager.hasLocalClasspaths()) {
setContextClassLoader(localDistributedCacheManager.makeClassLoader(
getContextClassLoader()));
}
profile = new JobProfile(job.getUser(), id, systemJobFile.toString(),
"http://localhost:8080/", job.getJobName());
status = new JobStatus(id, 0.0f, 0.0f, JobStatus.RUNNING,
profile.getUser(), profile.getJobName(), profile.getJobFile(),
profile.getURL().toString());
jobs.put(id, this);
this.start();
}
protected abstract class RunnableWithThrowable implements Runnable {
public volatile Throwable storedException;
}
/**
* A Runnable instance that handles a map task to be run by an executor.
*/
protected class MapTaskRunnable extends RunnableWithThrowable {
private final int taskId;
private final TaskSplitMetaInfo info;
private final JobID jobId;
private final JobConf localConf;
// This is a reference to a shared object passed in by the
// external context; this delivers state to the reducers regarding
// where to fetch mapper outputs.
private final Map<TaskAttemptID, MapOutputFile> mapOutputFiles;
public MapTaskRunnable(TaskSplitMetaInfo info, int taskId, JobID jobId,
Map<TaskAttemptID, MapOutputFile> mapOutputFiles) {
this.info = info;
this.taskId = taskId;
this.mapOutputFiles = mapOutputFiles;
this.jobId = jobId;
this.localConf = new JobConf(job);
}
public void run() {
try {
TaskAttemptID mapId = new TaskAttemptID(new TaskID(
jobId, TaskType.MAP, taskId), 0);
LOG.info("Starting task: " + mapId);
mapIds.add(mapId);
MapTask map = new MapTask(systemJobFile.toString(), mapId, taskId,
info.getSplitIndex(), 1);
map.setUser(UserGroupInformation.getCurrentUser().
getShortUserName());
setupChildMapredLocalDirs(map, localConf);
MapOutputFile mapOutput = new MROutputFiles();
mapOutput.setConf(localConf);
mapOutputFiles.put(mapId, mapOutput);
map.setJobFile(localJobFile.toString());
localConf.setUser(map.getUser());
map.localizeConfiguration(localConf);
map.setConf(localConf);
try {
map_tasks.getAndIncrement();
myMetrics.launchMap(mapId);
map.run(localConf, Job.this);
myMetrics.completeMap(mapId);
} finally {
map_tasks.getAndDecrement();
}
LOG.info("Finishing task: " + mapId);
} catch (Throwable e) {
this.storedException = e;
}
}
}
/**
* Create Runnables to encapsulate map tasks for use by the executor
* service.
* @param taskInfo Info about the map task splits
* @param jobId the job id
* @param mapOutputFiles a mapping from task attempts to output files
* @return a List of Runnables, one per map task.
*/
protected List<RunnableWithThrowable> getMapTaskRunnables(
TaskSplitMetaInfo [] taskInfo, JobID jobId,
Map<TaskAttemptID, MapOutputFile> mapOutputFiles) {
int numTasks = 0;
ArrayList<RunnableWithThrowable> list =
new ArrayList<RunnableWithThrowable>();
for (TaskSplitMetaInfo task : taskInfo) {
list.add(new MapTaskRunnable(task, numTasks++, jobId,
mapOutputFiles));
}
return list;
}
protected class ReduceTaskRunnable extends RunnableWithThrowable {
private final int taskId;
private final JobID jobId;
private final JobConf localConf;
// This is a reference to a shared object passed in by the
// external context; this delivers state to the reducers regarding
// where to fetch mapper outputs.
private final Map<TaskAttemptID, MapOutputFile> mapOutputFiles;
public ReduceTaskRunnable(int taskId, JobID jobId,
Map<TaskAttemptID, MapOutputFile> mapOutputFiles) {
this.taskId = taskId;
this.jobId = jobId;
this.mapOutputFiles = mapOutputFiles;
this.localConf = new JobConf(job);
this.localConf.set("mapreduce.jobtracker.address", "local");
}
public void run() {
try {
TaskAttemptID reduceId = new TaskAttemptID(new TaskID(
jobId, TaskType.REDUCE, taskId), 0);
LOG.info("Starting task: " + reduceId);
ReduceTask reduce = new ReduceTask(systemJobFile.toString(),
reduceId, taskId, mapIds.size(), 1);
reduce.setUser(UserGroupInformation.getCurrentUser().
getShortUserName());
setupChildMapredLocalDirs(reduce, localConf);
reduce.setLocalMapFiles(mapOutputFiles);
if (!Job.this.isInterrupted()) {
reduce.setJobFile(localJobFile.toString());
localConf.setUser(reduce.getUser());
reduce.localizeConfiguration(localConf);
reduce.setConf(localConf);
try {
reduce_tasks.getAndIncrement();
myMetrics.launchReduce(reduce.getTaskID());
reduce.run(localConf, Job.this);
myMetrics.completeReduce(reduce.getTaskID());
} finally {
reduce_tasks.getAndDecrement();
}
LOG.info("Finishing task: " + reduceId);
} else {
throw new InterruptedException();
}
} catch (Throwable t) {
// store this to be rethrown in the initial thread context.
this.storedException = t;
}
}
}
/**
* Create Runnables to encapsulate reduce tasks for use by the executor
* service.
* @param jobId the job id
* @param mapOutputFiles a mapping from task attempts to output files
* @return a List of Runnables, one per reduce task.
*/
protected List<RunnableWithThrowable> getReduceTaskRunnables(
JobID jobId, Map<TaskAttemptID, MapOutputFile> mapOutputFiles) {
int taskId = 0;
ArrayList<RunnableWithThrowable> list =
new ArrayList<RunnableWithThrowable>();
for (int i = 0; i < this.numReduceTasks; i++) {
list.add(new ReduceTaskRunnable(taskId++, jobId, mapOutputFiles));
}
return list;
}
/**
* Initialize the counters that will hold partial-progress from
* the various task attempts.
* @param numMaps the number of map tasks in this job.
*/
private synchronized void initCounters(int numMaps, int numReduces) {
// Initialize state trackers for all map tasks.
this.partialMapProgress = new float[numMaps];
this.mapCounters = new Counters[numMaps];
for (int i = 0; i < numMaps; i++) {
this.mapCounters[i] = new Counters();
}
this.partialReduceProgress = new float[numReduces];
this.reduceCounters = new Counters[numReduces];
for (int i = 0; i < numReduces; i++) {
this.reduceCounters[i] = new Counters();
}
this.numMapTasks = numMaps;
this.numReduceTasks = numReduces;
}
/**
* Creates the executor service used to run map tasks.
*
* @return an ExecutorService instance that handles map tasks
*/
protected synchronized ExecutorService createMapExecutor() {
// Determine the size of the thread pool to use
int maxMapThreads = job.getInt(LOCAL_MAX_MAPS, 1);
if (maxMapThreads < 1) {
throw new IllegalArgumentException(
"Configured " + LOCAL_MAX_MAPS + " must be >= 1");
}
maxMapThreads = Math.min(maxMapThreads, this.numMapTasks);
maxMapThreads = Math.max(maxMapThreads, 1); // In case of no tasks.
LOG.debug("Starting mapper thread pool executor.");
LOG.debug("Max local threads: " + maxMapThreads);
LOG.debug("Map tasks to process: " + this.numMapTasks);
// Create a new executor service to drain the work queue.
ThreadFactory tf = new ThreadFactoryBuilder()
.setNameFormat("LocalJobRunner Map Task Executor #%d")
.build();
ExecutorService executor = Executors.newFixedThreadPool(maxMapThreads, tf);
return executor;
}
/**
* Creates the executor service used to run reduce tasks.
*
* @return an ExecutorService instance that handles reduce tasks
*/
protected synchronized ExecutorService createReduceExecutor() {
// Determine the size of the thread pool to use
int maxReduceThreads = job.getInt(LOCAL_MAX_REDUCES, 1);
if (maxReduceThreads < 1) {
throw new IllegalArgumentException(
"Configured " + LOCAL_MAX_REDUCES + " must be >= 1");
}
maxReduceThreads = Math.min(maxReduceThreads, this.numReduceTasks);
maxReduceThreads = Math.max(maxReduceThreads, 1); // In case of no tasks.
LOG.debug("Starting reduce thread pool executor.");
LOG.debug("Max local threads: " + maxReduceThreads);
LOG.debug("Reduce tasks to process: " + this.numReduceTasks);
// Create a new executor service to drain the work queue.
ExecutorService executor = Executors.newFixedThreadPool(maxReduceThreads);
return executor;
}
/** Run a set of tasks and waits for them to complete. */
private void runTasks(List<RunnableWithThrowable> runnables,
ExecutorService service, String taskType) throws Exception {
// Start populating the executor with work units.
// They may begin running immediately (in other threads).
for (Runnable r : runnables) {
service.submit(r);
}
try {
service.shutdown(); // Instructs queue to drain.
// Wait for tasks to finish; do not use a time-based timeout.
// (See http://bugs.sun.com/bugdatabase/view_bug.do?bug_id=6179024)
LOG.info("Waiting for " + taskType + " tasks");
service.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS);
} catch (InterruptedException ie) {
// Cancel all threads.
service.shutdownNow();
throw ie;
}
LOG.info(taskType + " task executor complete.");
// After waiting for the tasks to complete, if any of these
// have thrown an exception, rethrow it now in the main thread context.
for (RunnableWithThrowable r : runnables) {
if (r.storedException != null) {
throw new Exception(r.storedException);
}
}
}
private org.apache.hadoop.mapreduce.OutputCommitter
createOutputCommitter(boolean newApiCommitter, JobID jobId, Configuration conf) throws Exception {
org.apache.hadoop.mapreduce.OutputCommitter committer = null;
LOG.info("OutputCommitter set in config "
+ conf.get("mapred.output.committer.class"));
if (newApiCommitter) {
org.apache.hadoop.mapreduce.TaskID taskId =
new org.apache.hadoop.mapreduce.TaskID(jobId, TaskType.MAP, 0);
org.apache.hadoop.mapreduce.TaskAttemptID taskAttemptID =
new org.apache.hadoop.mapreduce.TaskAttemptID(taskId, 0);
org.apache.hadoop.mapreduce.TaskAttemptContext taskContext =
new TaskAttemptContextImpl(conf, taskAttemptID);
OutputFormat outputFormat =
ReflectionUtils.newInstance(taskContext.getOutputFormatClass(), conf);
committer = outputFormat.getOutputCommitter(taskContext);
} else {
committer = ReflectionUtils.newInstance(conf.getClass(
"mapred.output.committer.class", FileOutputCommitter.class,
org.apache.hadoop.mapred.OutputCommitter.class), conf);
}
LOG.info("OutputCommitter is " + committer.getClass().getName());
return committer;
}
@Override
public void run() {
JobID jobId = profile.getJobID();
JobContext jContext = new JobContextImpl(job, jobId);
org.apache.hadoop.mapreduce.OutputCommitter outputCommitter = null;
try {
outputCommitter = createOutputCommitter(conf.getUseNewMapper(), jobId, conf);
} catch (Exception e) {
LOG.info("Failed to createOutputCommitter", e);
return;
}
try {
TaskSplitMetaInfo[] taskSplitMetaInfos =
SplitMetaInfoReader.readSplitMetaInfo(jobId, localFs, conf, systemJobDir);
int numReduceTasks = job.getNumReduceTasks();
outputCommitter.setupJob(jContext);
status.setSetupProgress(1.0f);
Map<TaskAttemptID, MapOutputFile> mapOutputFiles =
Collections.synchronizedMap(new HashMap<TaskAttemptID, MapOutputFile>());
List<RunnableWithThrowable> mapRunnables = getMapTaskRunnables(
taskSplitMetaInfos, jobId, mapOutputFiles);
initCounters(mapRunnables.size(), numReduceTasks);
ExecutorService mapService = createMapExecutor();
runTasks(mapRunnables, mapService, "map");
try {
if (numReduceTasks > 0) {
List<RunnableWithThrowable> reduceRunnables = getReduceTaskRunnables(
jobId, mapOutputFiles);
ExecutorService reduceService = createReduceExecutor();
runTasks(reduceRunnables, reduceService, "reduce");
}
} finally {
for (MapOutputFile output : mapOutputFiles.values()) {
output.removeAll();
}
}
// delete the temporary directory in output directory
outputCommitter.commitJob(jContext);
status.setCleanupProgress(1.0f);
if (killed) {
this.status.setRunState(JobStatus.KILLED);
} else {
this.status.setRunState(JobStatus.SUCCEEDED);
}
JobEndNotifier.localRunnerNotification(job, status);
} catch (Throwable t) {
try {
outputCommitter.abortJob(jContext,
org.apache.hadoop.mapreduce.JobStatus.State.FAILED);
} catch (IOException ioe) {
LOG.info("Error cleaning up job:" + id);
}
status.setCleanupProgress(1.0f);
if (killed) {
this.status.setRunState(JobStatus.KILLED);
} else {
this.status.setRunState(JobStatus.FAILED);
}
LOG.warn(id, t);
JobEndNotifier.localRunnerNotification(job, status);
} finally {
try {
fs.delete(systemJobFile.getParent(), true); // delete submit dir
localFs.delete(localJobFile, true); // delete local copy
// Cleanup distributed cache
localDistributedCacheManager.close();
} catch (IOException e) {
LOG.warn("Error cleaning up "+id+": "+e);
}
}
}
// TaskUmbilicalProtocol methods
public JvmTask getTask(JvmContext context) { return null; }
public synchronized boolean statusUpdate(TaskAttemptID taskId,
TaskStatus taskStatus) throws IOException, InterruptedException {
// Serialize as we would if distributed in order to make deep copy
ByteArrayOutputStream baos = new ByteArrayOutputStream();
DataOutputStream dos = new DataOutputStream(baos);
taskStatus.write(dos);
dos.close();
taskStatus = TaskStatus.createTaskStatus(taskStatus.getIsMap());
taskStatus.readFields(new DataInputStream(
new ByteArrayInputStream(baos.toByteArray())));
LOG.info(taskStatus.getStateString());
int mapTaskIndex = mapIds.indexOf(taskId);
if (mapTaskIndex >= 0) {
// mapping
float numTasks = (float) this.numMapTasks;
partialMapProgress[mapTaskIndex] = taskStatus.getProgress();
mapCounters[mapTaskIndex] = taskStatus.getCounters();
float partialProgress = 0.0f;
for (float f : partialMapProgress) {
partialProgress += f;
}
status.setMapProgress(partialProgress / numTasks);
} else {
// reducing
int reduceTaskIndex = taskId.getTaskID().getId();
float numTasks = (float) this.numReduceTasks;
partialReduceProgress[reduceTaskIndex] = taskStatus.getProgress();
reduceCounters[reduceTaskIndex] = taskStatus.getCounters();
float partialProgress = 0.0f;
for (float f : partialReduceProgress) {
partialProgress += f;
}
status.setReduceProgress(partialProgress / numTasks);
}
// ignore phase
return true;
}
/** Return the current values of the counters for this job,
* including tasks that are in progress.
*/
public synchronized Counters getCurrentCounters() {
if (null == mapCounters) {
// Counters not yet initialized for job.
return new Counters();
}
Counters current = new Counters();
for (Counters c : mapCounters) {
current = Counters.sum(current, c);
}
if (null != reduceCounters && reduceCounters.length > 0) {
for (Counters c : reduceCounters) {
current = Counters.sum(current, c);
}
}
return current;
}
/**
* Task is reporting that it is in commit_pending
* and it is waiting for the commit Response
*/
public void commitPending(TaskAttemptID taskid,
TaskStatus taskStatus)
throws IOException, InterruptedException {
statusUpdate(taskid, taskStatus);
}
public void reportDiagnosticInfo(TaskAttemptID taskid, String trace) {
// Ignore for now
}
public void reportNextRecordRange(TaskAttemptID taskid,
SortedRanges.Range range) throws IOException {
LOG.info("Task " + taskid + " reportedNextRecordRange " + range);
}
public boolean ping(TaskAttemptID taskid) throws IOException {
return true;
}
public boolean canCommit(TaskAttemptID taskid)
throws IOException {
return true;
}
public void done(TaskAttemptID taskId) throws IOException {
int taskIndex = mapIds.indexOf(taskId);
if (taskIndex >= 0) { // mapping
status.setMapProgress(1.0f);
} else {
status.setReduceProgress(1.0f);
}
}
public synchronized void fsError(TaskAttemptID taskId, String message)
throws IOException {
LOG.fatal("FSError: "+ message + "from task: " + taskId);
}
public void shuffleError(TaskAttemptID taskId, String message) throws IOException {
LOG.fatal("shuffleError: "+ message + "from task: " + taskId);
}
public synchronized void fatalError(TaskAttemptID taskId, String msg)
throws IOException {
LOG.fatal("Fatal: "+ msg + "from task: " + taskId);
}
public MapTaskCompletionEventsUpdate getMapCompletionEvents(JobID jobId,
int fromEventId, int maxLocs, TaskAttemptID id) throws IOException {
return new MapTaskCompletionEventsUpdate(
org.apache.hadoop.mapred.TaskCompletionEvent.EMPTY_ARRAY, false);
}
}
public LocalJobRunner(Configuration conf) throws IOException {
this(new JobConf(conf));
}
@Deprecated
public LocalJobRunner(JobConf conf) throws IOException {
this.fs = FileSystem.getLocal(conf);
this.conf = conf;
myMetrics = new LocalJobRunnerMetrics(new JobConf(conf));
}
// JobSubmissionProtocol methods
private static int jobid = 0;
// used for making sure that local jobs run in different jvms don't
// collide on staging or job directories
private int randid;
public synchronized org.apache.hadoop.mapreduce.JobID getNewJobID() {
return new org.apache.hadoop.mapreduce.JobID("local" + randid, ++jobid);
}
public org.apache.hadoop.mapreduce.JobStatus submitJob(
org.apache.hadoop.mapreduce.JobID jobid, String jobSubmitDir,
Credentials credentials) throws IOException {
Job job = new Job(JobID.downgrade(jobid), jobSubmitDir);
job.job.setCredentials(credentials);
return job.status;
}
public void killJob(org.apache.hadoop.mapreduce.JobID id) {
jobs.get(JobID.downgrade(id)).killed = true;
jobs.get(JobID.downgrade(id)).interrupt();
}
public void setJobPriority(org.apache.hadoop.mapreduce.JobID id,
String jp) throws IOException {
throw new UnsupportedOperationException("Changing job priority " +
"in LocalJobRunner is not supported.");
}
/** Throws {@link UnsupportedOperationException} */
public boolean killTask(org.apache.hadoop.mapreduce.TaskAttemptID taskId,
boolean shouldFail) throws IOException {
throw new UnsupportedOperationException("Killing tasks in " +
"LocalJobRunner is not supported");
}
public org.apache.hadoop.mapreduce.TaskReport[] getTaskReports(
org.apache.hadoop.mapreduce.JobID id, TaskType type) {
return new org.apache.hadoop.mapreduce.TaskReport[0];
}
public org.apache.hadoop.mapreduce.JobStatus getJobStatus(
org.apache.hadoop.mapreduce.JobID id) {
Job job = jobs.get(JobID.downgrade(id));
if(job != null)
return job.status;
else
return null;
}
public org.apache.hadoop.mapreduce.Counters getJobCounters(
org.apache.hadoop.mapreduce.JobID id) {
Job job = jobs.get(JobID.downgrade(id));
return new org.apache.hadoop.mapreduce.Counters(job.getCurrentCounters());
}
public String getFilesystemName() throws IOException {
return fs.getUri().toString();
}
public ClusterMetrics getClusterMetrics() {
int numMapTasks = map_tasks.get();
int numReduceTasks = reduce_tasks.get();
return new ClusterMetrics(numMapTasks, numReduceTasks, numMapTasks,
numReduceTasks, 0, 0, 1, 1, jobs.size(), 1, 0, 0);
}
public JobTrackerStatus getJobTrackerStatus() {
return JobTrackerStatus.RUNNING;
}
public long getTaskTrackerExpiryInterval() throws IOException, InterruptedException {
return 0;
}
/**
* Get all active trackers in cluster.
* @return array of TaskTrackerInfo
*/
public TaskTrackerInfo[] getActiveTrackers()
throws IOException, InterruptedException {
return new TaskTrackerInfo[0];
}
/**
* Get all blacklisted trackers in cluster.
* @return array of TaskTrackerInfo
*/
public TaskTrackerInfo[] getBlacklistedTrackers()
throws IOException, InterruptedException {
return new TaskTrackerInfo[0];
}
public TaskCompletionEvent[] getTaskCompletionEvents(
org.apache.hadoop.mapreduce.JobID jobid
, int fromEventId, int maxEvents) throws IOException {
return TaskCompletionEvent.EMPTY_ARRAY;
}
public org.apache.hadoop.mapreduce.JobStatus[] getAllJobs() {return null;}
/**
* Returns the diagnostic information for a particular task in the given job.
* To be implemented
*/
public String[] getTaskDiagnostics(
org.apache.hadoop.mapreduce.TaskAttemptID taskid) throws IOException{
return new String [0];
}
/**
* @see org.apache.hadoop.mapreduce.protocol.ClientProtocol#getSystemDir()
*/
public String getSystemDir() {
Path sysDir = new Path(
conf.get(JTConfig.JT_SYSTEM_DIR, "/tmp/hadoop/mapred/system"));
return fs.makeQualified(sysDir).toString();
}
/**
* @see org.apache.hadoop.mapreduce.protocol.ClientProtocol#getQueueAdmins(String)
*/
public AccessControlList getQueueAdmins(String queueName) throws IOException {
return new AccessControlList(" ");// no queue admins for local job runner
}
/**
* @see org.apache.hadoop.mapreduce.protocol.ClientProtocol#getStagingAreaDir()
*/
public String getStagingAreaDir() throws IOException {
Path stagingRootDir = new Path(conf.get(JTConfig.JT_STAGING_AREA_ROOT,
"/tmp/hadoop/mapred/staging"));
UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
String user;
randid = rand.nextInt(Integer.MAX_VALUE);
if (ugi != null) {
user = ugi.getShortUserName() + randid;
} else {
user = "dummy" + randid;
}
return fs.makeQualified(new Path(stagingRootDir, user+"/.staging")).toString();
}
public String getJobHistoryDir() {
return null;
}
@Override
public QueueInfo[] getChildQueues(String queueName) throws IOException {
return null;
}
@Override
public QueueInfo[] getRootQueues() throws IOException {
return null;
}
@Override
public QueueInfo[] getQueues() throws IOException {
return null;
}
@Override
public QueueInfo getQueue(String queue) throws IOException {
return null;
}
@Override
public org.apache.hadoop.mapreduce.QueueAclsInfo[]
getQueueAclsForCurrentUser() throws IOException{
return null;
}
/**
* Set the max number of map tasks to run concurrently in the LocalJobRunner.
* @param job the job to configure
* @param maxMaps the maximum number of map tasks to allow.
*/
public static void setLocalMaxRunningMaps(
org.apache.hadoop.mapreduce.JobContext job,
int maxMaps) {
job.getConfiguration().setInt(LOCAL_MAX_MAPS, maxMaps);
}
/**
* @return the max number of map tasks to run concurrently in the
* LocalJobRunner.
*/
public static int getLocalMaxRunningMaps(
org.apache.hadoop.mapreduce.JobContext job) {
return job.getConfiguration().getInt(LOCAL_MAX_MAPS, 1);
}
/**
* Set the max number of reduce tasks to run concurrently in the LocalJobRunner.
* @param job the job to configure
* @param maxReduces the maximum number of reduce tasks to allow.
*/
public static void setLocalMaxRunningReduces(
org.apache.hadoop.mapreduce.JobContext job,
int maxReduces) {
job.getConfiguration().setInt(LOCAL_MAX_REDUCES, maxReduces);
}
/**
* @return the max number of reduce tasks to run concurrently in the
* LocalJobRunner.
*/
public static int getLocalMaxRunningReduces(
org.apache.hadoop.mapreduce.JobContext job) {
return job.getConfiguration().getInt(LOCAL_MAX_REDUCES, 1);
}
@Override
public void cancelDelegationToken(Token<DelegationTokenIdentifier> token
) throws IOException,
InterruptedException {
}
@Override
public Token<DelegationTokenIdentifier>
getDelegationToken(Text renewer) throws IOException, InterruptedException {
return null;
}
@Override
public long renewDelegationToken(Token<DelegationTokenIdentifier> token
) throws IOException,InterruptedException{
return 0;
}
@Override
public LogParams getLogFileParams(org.apache.hadoop.mapreduce.JobID jobID,
org.apache.hadoop.mapreduce.TaskAttemptID taskAttemptID)
throws IOException, InterruptedException {
throw new UnsupportedOperationException("Not supported");
}
static void setupChildMapredLocalDirs(Task t, JobConf conf) {
String[] localDirs = conf.getTrimmedStrings(MRConfig.LOCAL_DIR);
String jobId = t.getJobID().toString();
String taskId = t.getTaskID().toString();
boolean isCleanup = t.isTaskCleanupTask();
String user = t.getUser();
StringBuffer childMapredLocalDir =
new StringBuffer(localDirs[0] + Path.SEPARATOR
+ getLocalTaskDir(user, jobId, taskId, isCleanup));
for (int i = 1; i < localDirs.length; i++) {
childMapredLocalDir.append("," + localDirs[i] + Path.SEPARATOR
+ getLocalTaskDir(user, jobId, taskId, isCleanup));
}
LOG.debug(MRConfig.LOCAL_DIR + " for child : " + childMapredLocalDir);
conf.set(MRConfig.LOCAL_DIR, childMapredLocalDir.toString());
}
static final String TASK_CLEANUP_SUFFIX = ".cleanup";
static final String JOBCACHE = "jobcache";
static String getLocalTaskDir(String user, String jobid, String taskid,
boolean isCleanupAttempt) {
String taskDir = jobDir + Path.SEPARATOR + user + Path.SEPARATOR + JOBCACHE
+ Path.SEPARATOR + jobid + Path.SEPARATOR + taskid;
if (isCleanupAttempt) {
taskDir = taskDir + TASK_CLEANUP_SUFFIX;
}
return taskDir;
}
}
| 35,110 | 34.35851 | 102 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapred/LocalClientProtocolProvider.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.IOException;
import java.net.InetSocketAddress;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapreduce.MRConfig;
import org.apache.hadoop.mapreduce.protocol.ClientProtocol;
import org.apache.hadoop.mapreduce.protocol.ClientProtocolProvider;
@InterfaceAudience.Private
public class LocalClientProtocolProvider extends ClientProtocolProvider {
@Override
public ClientProtocol create(Configuration conf) throws IOException {
String framework =
conf.get(MRConfig.FRAMEWORK_NAME, MRConfig.LOCAL_FRAMEWORK_NAME);
if (!MRConfig.LOCAL_FRAMEWORK_NAME.equals(framework)) {
return null;
}
conf.setInt(JobContext.NUM_MAPS, 1);
return new LocalJobRunner(conf);
}
@Override
public ClientProtocol create(InetSocketAddress addr, Configuration conf) {
return null; // LocalJobRunner doesn't use a socket
}
@Override
public void close(ClientProtocol clientProtocol) {
// no clean up required
}
}
| 1,887 | 32.714286 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapred/LocalJobRunnerMetrics.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import org.apache.hadoop.metrics.MetricsContext;
import org.apache.hadoop.metrics.MetricsRecord;
import org.apache.hadoop.metrics.MetricsUtil;
import org.apache.hadoop.metrics.Updater;
import org.apache.hadoop.metrics.jvm.JvmMetrics;
@SuppressWarnings("deprecation")
class LocalJobRunnerMetrics implements Updater {
private final MetricsRecord metricsRecord;
private int numMapTasksLaunched = 0;
private int numMapTasksCompleted = 0;
private int numReduceTasksLaunched = 0;
private int numReduceTasksCompleted = 0;
private int numWaitingMaps = 0;
private int numWaitingReduces = 0;
public LocalJobRunnerMetrics(JobConf conf) {
String sessionId = conf.getSessionId();
// Initiate JVM Metrics
JvmMetrics.init("JobTracker", sessionId);
// Create a record for map-reduce metrics
MetricsContext context = MetricsUtil.getContext("mapred");
// record name is jobtracker for compatibility
metricsRecord = MetricsUtil.createRecord(context, "jobtracker");
metricsRecord.setTag("sessionId", sessionId);
context.registerUpdater(this);
}
/**
* Since this object is a registered updater, this method will be called
* periodically, e.g. every 5 seconds.
*/
public void doUpdates(MetricsContext unused) {
synchronized (this) {
metricsRecord.incrMetric("maps_launched", numMapTasksLaunched);
metricsRecord.incrMetric("maps_completed", numMapTasksCompleted);
metricsRecord.incrMetric("reduces_launched", numReduceTasksLaunched);
metricsRecord.incrMetric("reduces_completed", numReduceTasksCompleted);
metricsRecord.incrMetric("waiting_maps", numWaitingMaps);
metricsRecord.incrMetric("waiting_reduces", numWaitingReduces);
numMapTasksLaunched = 0;
numMapTasksCompleted = 0;
numReduceTasksLaunched = 0;
numReduceTasksCompleted = 0;
numWaitingMaps = 0;
numWaitingReduces = 0;
}
metricsRecord.update();
}
public synchronized void launchMap(TaskAttemptID taskAttemptID) {
++numMapTasksLaunched;
decWaitingMaps(taskAttemptID.getJobID(), 1);
}
public synchronized void completeMap(TaskAttemptID taskAttemptID) {
++numMapTasksCompleted;
}
public synchronized void launchReduce(TaskAttemptID taskAttemptID) {
++numReduceTasksLaunched;
decWaitingReduces(taskAttemptID.getJobID(), 1);
}
public synchronized void completeReduce(TaskAttemptID taskAttemptID) {
++numReduceTasksCompleted;
}
private synchronized void decWaitingMaps(JobID id, int task) {
numWaitingMaps -= task;
}
private synchronized void decWaitingReduces(JobID id, int task){
numWaitingReduces -= task;
}
}
| 3,516 | 34.525253 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/yarn/proto/HSClientProtocol.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.proto;
import org.apache.hadoop.mapreduce.v2.api.MRClientProtocolPB;
import org.apache.hadoop.yarn.proto.MRClientProtocol.MRClientProtocolService;
/**
* Fake protocol to differentiate the blocking interfaces in the
* security info class loaders.
*/
public interface HSClientProtocol {
public abstract class HSClientProtocolService {
public interface BlockingInterface extends MRClientProtocolPB {
}
public static com.google.protobuf.BlockingService newReflectiveBlockingService(
final HSClientProtocolService.BlockingInterface impl) {
// The cast is safe
return MRClientProtocolService
.newReflectiveBlockingService((MRClientProtocolService.BlockingInterface) impl);
}
}
}
| 1,568 | 38.225 | 90 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/testjar/ExternalMapperReducer.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package testjar;
import java.io.IOException;
import java.util.Iterator;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.Mapper;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Reducer;
import org.apache.hadoop.mapred.Reporter;
public class ExternalMapperReducer
implements Mapper<WritableComparable, Writable,
ExternalWritable, IntWritable>,
Reducer<WritableComparable, Writable,
WritableComparable, IntWritable> {
public void configure(JobConf job) {
}
public void close()
throws IOException {
}
public void map(WritableComparable key, Writable value,
OutputCollector<ExternalWritable, IntWritable> output,
Reporter reporter)
throws IOException {
if (value instanceof Text) {
Text text = (Text)value;
ExternalWritable ext = new ExternalWritable(text.toString());
output.collect(ext, new IntWritable(1));
}
}
public void reduce(WritableComparable key, Iterator<Writable> values,
OutputCollector<WritableComparable, IntWritable> output,
Reporter reporter)
throws IOException {
int count = 0;
while (values.hasNext()) {
count++;
values.next();
}
output.collect(key, new IntWritable(count));
}
}
| 2,355 | 30.837838 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/testjar/ExternalWritable.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package testjar;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import org.apache.hadoop.io.WritableComparable;
/**
* This is an example simple writable class. This is used as a class external
* to the Hadoop IO classes for testing of user Writable classes.
*
*/
public class ExternalWritable
implements WritableComparable {
private String message = null;
public ExternalWritable() {
}
public ExternalWritable(String message) {
this.message = message;
}
public String getMessage() {
return message;
}
public void setMessage(String message) {
this.message = message;
}
public void readFields(DataInput in)
throws IOException {
message = null;
boolean hasMessage = in.readBoolean();
if (hasMessage) {
message = in.readUTF();
}
}
public void write(DataOutput out)
throws IOException {
boolean hasMessage = (message != null && message.length() > 0);
out.writeBoolean(hasMessage);
if (hasMessage) {
out.writeUTF(message);
}
}
public int compareTo(Object o) {
if (!(o instanceof ExternalWritable)) {
throw new IllegalArgumentException("Input not an ExternalWritable");
}
ExternalWritable that = (ExternalWritable)o;
return this.message.compareTo(that.message);
}
public String toString() {
return this.message;
}
}
| 2,232 | 24.666667 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/testjar/CustomOutputCommitter.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package testjar;
import org.apache.hadoop.mapred.FileOutputCommitter;
public class CustomOutputCommitter extends FileOutputCommitter {
// custom output committer with default implementation
}
| 1,004 | 37.653846 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/testjar/ClassWordCount.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package testjar;
import java.io.*;
import java.util.*;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.mapred.JobClient;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.Mapper;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Reducer;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.mapred.MapReduceBase;
import org.apache.hadoop.mapred.WordCount;
/**
* This is an example Hadoop Map/Reduce application being used for
* TestMiniMRClasspath. Uses the WordCount examples in hadoop.
*/
public class ClassWordCount {
/**
* Counts the words in each line.
* For each line of input, break the line into words and emit them as
* (<b>word</b>, <b>1</b>).
*/
public static class MapClass extends WordCount.MapClass
implements Mapper<LongWritable, Text, Text, IntWritable> {
}
/**
* A reducer class that just emits the sum of the input values.
*/
public static class Reduce extends WordCount.Reduce
implements Reducer<Text, IntWritable, Text, IntWritable> {
}
}
| 2,100 | 33.442623 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/testjar/ExternalIdentityReducer.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package testjar;
import java.io.IOException;
import java.util.Iterator;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Reducer;
import org.apache.hadoop.mapred.Reporter;
public class ExternalIdentityReducer implements
Reducer<WritableComparable, Writable,
WritableComparable, Writable> {
public void configure(JobConf job) {
}
public void close()
throws IOException {
}
public void reduce(WritableComparable key, Iterator<Writable> values,
OutputCollector<WritableComparable, Writable> output,
Reporter reporter)
throws IOException {
while (values.hasNext()) {
output.collect(key, values.next());
}
}
}
| 1,695 | 31 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/testjar/UserNamePermission.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package testjar;
import java.io.IOException;
import java.util.Iterator;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
public class UserNamePermission
{
private static final Log LOG = LogFactory.getLog(UserNamePermission.class);
//This mapper will read the user name and pass in to the reducer
public static class UserNameMapper extends Mapper<LongWritable,Text,Text,Text>
{
Text key1 = new Text("UserName");
public void map(LongWritable key, Text value, Context context)
throws IOException,InterruptedException {
Text val = new Text(System.getProperty("user.name").toString());
context.write(key1, val);
}
}
//The reducer is responsible for writing the user name to the file
//which will be validated by the testcase
public static class UserNameReducer extends Reducer<Text,Text,Text,Text>
{
public void reduce(Text key, Iterator<Text> values,
Context context) throws IOException,InterruptedException {
LOG.info("The key "+key);
if(values.hasNext())
{
Text val = values.next();
LOG.info("The value "+val);
context.write(key,new Text(System.getProperty("user.name")));
}
}
}
public static void main(String [] args) throws Exception
{
Path outDir = new Path("output");
Configuration conf = new Configuration();
Job job = Job.getInstance(conf, "user name check");
job.setJarByClass(UserNamePermission.class);
job.setMapperClass(UserNamePermission.UserNameMapper.class);
job.setCombinerClass(UserNamePermission.UserNameReducer.class);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(Text.class);
job.setReducerClass(UserNamePermission.UserNameReducer.class);
job.setNumReduceTasks(1);
job.setInputFormatClass(TextInputFormat.class);
TextInputFormat.addInputPath(job, new Path("input"));
FileOutputFormat.setOutputPath(job, outDir);
System.exit(job.waitForCompletion(true) ? 0 : 1);
}
}
| 3,530 | 34.31 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/testjar/JobKillCommitter.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package testjar;
import java.io.IOException;
import java.util.Iterator;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.FileOutputCommitter;
import org.apache.hadoop.mapred.JobContext;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
public class JobKillCommitter {
/**
* The class provides a overrided implementation of output committer
* set up method, which causes the job to fail during set up.
*/
public static class CommitterWithFailSetup extends FileOutputCommitter {
@Override
public void setupJob(JobContext context) throws IOException {
throw new IOException();
}
}
/**
* The class provides a dummy implementation of outputcommitter
* which does nothing
*/
public static class CommitterWithNoError extends FileOutputCommitter {
@Override
public void setupJob(JobContext context) throws IOException {
}
@Override
public void commitJob(JobContext context) throws IOException {
}
}
/**
* The class provides a overrided implementation of commitJob which
* causes the clean up method to fail.
*/
public static class CommitterWithFailCleanup extends FileOutputCommitter {
@Override
public void commitJob(JobContext context) throws IOException {
throw new IOException();
}
}
/**
* The class is used provides a dummy implementation for mapper method which
* does nothing.
*/
public static class MapperPass extends Mapper<LongWritable, Text, Text, Text> {
public void map(LongWritable key, Text value, Context context)
throws IOException, InterruptedException {
}
}
/**
* The class provides a sleep implementation for mapper method.
*/
public static class MapperPassSleep extends
Mapper<LongWritable, Text, Text, Text> {
public void map(LongWritable key, Text value, Context context)
throws IOException, InterruptedException {
Thread.sleep(10000);
}
}
/**
* The class provides a way for the mapper function to fail by
* intentionally throwing an IOException
*/
public static class MapperFail extends Mapper<LongWritable, Text, Text, Text> {
public void map(LongWritable key, Text value, Context context)
throws IOException, InterruptedException {
throw new IOException();
}
}
/**
* The class provides a way for the reduce function to fail by
* intentionally throwing an IOException
*/
public static class ReducerFail extends Reducer<Text, Text, Text, Text> {
public void reduce(Text key, Iterator<Text> values, Context context)
throws IOException, InterruptedException {
throw new IOException();
}
}
/**
* The class provides a empty implementation of reducer method that
* does nothing
*/
public static class ReducerPass extends Reducer<Text, Text, Text, Text> {
public void reduce(Text key, Iterator<Text> values, Context context)
throws IOException, InterruptedException {
}
}
}
| 3,872 | 31.275 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/RandomTextWriterJob.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Date;
import java.util.List;
import java.util.Random;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.InputFormat;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.JobContext;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.RecordReader;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.lib.input.FileSplit;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
public class RandomTextWriterJob extends Configured implements Tool {
public static final String TOTAL_BYTES =
"mapreduce.randomtextwriter.totalbytes";
public static final String BYTES_PER_MAP =
"mapreduce.randomtextwriter.bytespermap";
public static final String MAX_VALUE = "mapreduce.randomtextwriter.maxwordsvalue";
public static final String MIN_VALUE = "mapreduce.randomtextwriter.minwordsvalue";
public static final String MIN_KEY = "mapreduce.randomtextwriter.minwordskey";
public static final String MAX_KEY = "mapreduce.randomtextwriter.maxwordskey";
static enum Counters { RECORDS_WRITTEN, BYTES_WRITTEN }
public Job createJob(Configuration conf) throws IOException {
long numBytesToWritePerMap = conf.getLong(BYTES_PER_MAP, 10 * 1024);
long totalBytesToWrite = conf.getLong(TOTAL_BYTES, numBytesToWritePerMap);
int numMaps = (int) (totalBytesToWrite / numBytesToWritePerMap);
if (numMaps == 0 && totalBytesToWrite > 0) {
numMaps = 1;
conf.setLong(BYTES_PER_MAP, totalBytesToWrite);
}
conf.setInt(MRJobConfig.NUM_MAPS, numMaps);
Job job = Job.getInstance(conf);
job.setJarByClass(RandomTextWriterJob.class);
job.setJobName("random-text-writer");
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(Text.class);
job.setInputFormatClass(RandomInputFormat.class);
job.setMapperClass(RandomTextMapper.class);
job.setOutputFormatClass(SequenceFileOutputFormat.class);
//FileOutputFormat.setOutputPath(job, new Path("random-output"));
job.setNumReduceTasks(0);
return job;
}
public static class RandomInputFormat extends InputFormat<Text, Text> {
/**
* Generate the requested number of file splits, with the filename
* set to the filename of the output file.
*/
public List<InputSplit> getSplits(JobContext job) throws IOException {
List<InputSplit> result = new ArrayList<InputSplit>();
Path outDir = FileOutputFormat.getOutputPath(job);
int numSplits =
job.getConfiguration().getInt(MRJobConfig.NUM_MAPS, 1);
for(int i=0; i < numSplits; ++i) {
result.add(new FileSplit(new Path(outDir, "dummy-split-" + i), 0, 1,
(String[])null));
}
return result;
}
/**
* Return a single record (filename, "") where the filename is taken from
* the file split.
*/
public static class RandomRecordReader extends RecordReader<Text, Text> {
Path name;
Text key = null;
Text value = new Text();
public RandomRecordReader(Path p) {
name = p;
}
public void initialize(InputSplit split,
TaskAttemptContext context)
throws IOException, InterruptedException {
}
public boolean nextKeyValue() {
if (name != null) {
key = new Text();
key.set(name.getName());
name = null;
return true;
}
return false;
}
public Text getCurrentKey() {
return key;
}
public Text getCurrentValue() {
return value;
}
public void close() {}
public float getProgress() {
return 0.0f;
}
}
public RecordReader<Text, Text> createRecordReader(InputSplit split,
TaskAttemptContext context) throws IOException, InterruptedException {
return new RandomRecordReader(((FileSplit) split).getPath());
}
}
public static class RandomTextMapper extends Mapper<Text, Text, Text, Text> {
private long numBytesToWrite;
private int minWordsInKey;
private int wordsInKeyRange;
private int minWordsInValue;
private int wordsInValueRange;
private Random random = new Random();
/**
* Save the configuration value that we need to write the data.
*/
public void setup(Context context) {
Configuration conf = context.getConfiguration();
numBytesToWrite = conf.getLong(BYTES_PER_MAP,
1*1024*1024*1024);
minWordsInKey = conf.getInt(MIN_KEY, 5);
wordsInKeyRange = (conf.getInt(MAX_KEY, 10) - minWordsInKey);
minWordsInValue = conf.getInt(MIN_VALUE, 10);
wordsInValueRange = (conf.getInt(MAX_VALUE, 100) - minWordsInValue);
}
/**
* Given an output filename, write a bunch of random records to it.
*/
public void map(Text key, Text value,
Context context) throws IOException,InterruptedException {
int itemCount = 0;
while (numBytesToWrite > 0) {
// Generate the key/value
int noWordsKey = minWordsInKey +
(wordsInKeyRange != 0 ? random.nextInt(wordsInKeyRange) : 0);
int noWordsValue = minWordsInValue +
(wordsInValueRange != 0 ? random.nextInt(wordsInValueRange) : 0);
Text keyWords = generateSentence(noWordsKey);
Text valueWords = generateSentence(noWordsValue);
// Write the sentence
context.write(keyWords, valueWords);
numBytesToWrite -= (keyWords.getLength() + valueWords.getLength());
// Update counters, progress etc.
context.getCounter(Counters.BYTES_WRITTEN).increment(
keyWords.getLength() + valueWords.getLength());
context.getCounter(Counters.RECORDS_WRITTEN).increment(1);
if (++itemCount % 200 == 0) {
context.setStatus("wrote record " + itemCount + ". " +
numBytesToWrite + " bytes left.");
}
}
context.setStatus("done with " + itemCount + " records.");
}
private Text generateSentence(int noWords) {
StringBuffer sentence = new StringBuffer();
String space = " ";
for (int i=0; i < noWords; ++i) {
sentence.append(words[random.nextInt(words.length)]);
sentence.append(space);
}
return new Text(sentence.toString());
}
private static String[] words = {
"diurnalness", "Homoiousian",
"spiranthic", "tetragynian",
"silverhead", "ungreat",
"lithograph", "exploiter",
"physiologian", "by",
"hellbender", "Filipendula",
"undeterring", "antiscolic",
"pentagamist", "hypoid",
"cacuminal", "sertularian",
"schoolmasterism", "nonuple",
"gallybeggar", "phytonic",
"swearingly", "nebular",
"Confervales", "thermochemically",
"characinoid", "cocksuredom",
"fallacious", "feasibleness",
"debromination", "playfellowship",
"tramplike", "testa",
"participatingly", "unaccessible",
"bromate", "experientialist",
"roughcast", "docimastical",
"choralcelo", "blightbird",
"peptonate", "sombreroed",
"unschematized", "antiabolitionist",
"besagne", "mastication",
"bromic", "sviatonosite",
"cattimandoo", "metaphrastical",
"endotheliomyoma", "hysterolysis",
"unfulminated", "Hester",
"oblongly", "blurredness",
"authorling", "chasmy",
"Scorpaenidae", "toxihaemia",
"Dictograph", "Quakerishly",
"deaf", "timbermonger",
"strammel", "Thraupidae",
"seditious", "plerome",
"Arneb", "eristically",
"serpentinic", "glaumrie",
"socioromantic", "apocalypst",
"tartrous", "Bassaris",
"angiolymphoma", "horsefly",
"kenno", "astronomize",
"euphemious", "arsenide",
"untongued", "parabolicness",
"uvanite", "helpless",
"gemmeous", "stormy",
"templar", "erythrodextrin",
"comism", "interfraternal",
"preparative", "parastas",
"frontoorbital", "Ophiosaurus",
"diopside", "serosanguineous",
"ununiformly", "karyological",
"collegian", "allotropic",
"depravity", "amylogenesis",
"reformatory", "epidymides",
"pleurotropous", "trillium",
"dastardliness", "coadvice",
"embryotic", "benthonic",
"pomiferous", "figureheadship",
"Megaluridae", "Harpa",
"frenal", "commotion",
"abthainry", "cobeliever",
"manilla", "spiciferous",
"nativeness", "obispo",
"monilioid", "biopsic",
"valvula", "enterostomy",
"planosubulate", "pterostigma",
"lifter", "triradiated",
"venialness", "tum",
"archistome", "tautness",
"unswanlike", "antivenin",
"Lentibulariaceae", "Triphora",
"angiopathy", "anta",
"Dawsonia", "becomma",
"Yannigan", "winterproof",
"antalgol", "harr",
"underogating", "ineunt",
"cornberry", "flippantness",
"scyphostoma", "approbation",
"Ghent", "Macraucheniidae",
"scabbiness", "unanatomized",
"photoelasticity", "eurythermal",
"enation", "prepavement",
"flushgate", "subsequentially",
"Edo", "antihero",
"Isokontae", "unforkedness",
"porriginous", "daytime",
"nonexecutive", "trisilicic",
"morphiomania", "paranephros",
"botchedly", "impugnation",
"Dodecatheon", "obolus",
"unburnt", "provedore",
"Aktistetae", "superindifference",
"Alethea", "Joachimite",
"cyanophilous", "chorograph",
"brooky", "figured",
"periclitation", "quintette",
"hondo", "ornithodelphous",
"unefficient", "pondside",
"bogydom", "laurinoxylon",
"Shiah", "unharmed",
"cartful", "noncrystallized",
"abusiveness", "cromlech",
"japanned", "rizzomed",
"underskin", "adscendent",
"allectory", "gelatinousness",
"volcano", "uncompromisingly",
"cubit", "idiotize",
"unfurbelowed", "undinted",
"magnetooptics", "Savitar",
"diwata", "ramosopalmate",
"Pishquow", "tomorn",
"apopenptic", "Haversian",
"Hysterocarpus", "ten",
"outhue", "Bertat",
"mechanist", "asparaginic",
"velaric", "tonsure",
"bubble", "Pyrales",
"regardful", "glyphography",
"calabazilla", "shellworker",
"stradametrical", "havoc",
"theologicopolitical", "sawdust",
"diatomaceous", "jajman",
"temporomastoid", "Serrifera",
"Ochnaceae", "aspersor",
"trailmaking", "Bishareen",
"digitule", "octogynous",
"epididymitis", "smokefarthings",
"bacillite", "overcrown",
"mangonism", "sirrah",
"undecorated", "psychofugal",
"bismuthiferous", "rechar",
"Lemuridae", "frameable",
"thiodiazole", "Scanic",
"sportswomanship", "interruptedness",
"admissory", "osteopaedion",
"tingly", "tomorrowness",
"ethnocracy", "trabecular",
"vitally", "fossilism",
"adz", "metopon",
"prefatorial", "expiscate",
"diathermacy", "chronist",
"nigh", "generalizable",
"hysterogen", "aurothiosulphuric",
"whitlowwort", "downthrust",
"Protestantize", "monander",
"Itea", "chronographic",
"silicize", "Dunlop",
"eer", "componental",
"spot", "pamphlet",
"antineuritic", "paradisean",
"interruptor", "debellator",
"overcultured", "Florissant",
"hyocholic", "pneumatotherapy",
"tailoress", "rave",
"unpeople", "Sebastian",
"thermanesthesia", "Coniferae",
"swacking", "posterishness",
"ethmopalatal", "whittle",
"analgize", "scabbardless",
"naught", "symbiogenetically",
"trip", "parodist",
"columniform", "trunnel",
"yawler", "goodwill",
"pseudohalogen", "swangy",
"cervisial", "mediateness",
"genii", "imprescribable",
"pony", "consumptional",
"carposporangial", "poleax",
"bestill", "subfebrile",
"sapphiric", "arrowworm",
"qualminess", "ultraobscure",
"thorite", "Fouquieria",
"Bermudian", "prescriber",
"elemicin", "warlike",
"semiangle", "rotular",
"misthread", "returnability",
"seraphism", "precostal",
"quarried", "Babylonism",
"sangaree", "seelful",
"placatory", "pachydermous",
"bozal", "galbulus",
"spermaphyte", "cumbrousness",
"pope", "signifier",
"Endomycetaceae", "shallowish",
"sequacity", "periarthritis",
"bathysphere", "pentosuria",
"Dadaism", "spookdom",
"Consolamentum", "afterpressure",
"mutter", "louse",
"ovoviviparous", "corbel",
"metastoma", "biventer",
"Hydrangea", "hogmace",
"seizing", "nonsuppressed",
"oratorize", "uncarefully",
"benzothiofuran", "penult",
"balanocele", "macropterous",
"dishpan", "marten",
"absvolt", "jirble",
"parmelioid", "airfreighter",
"acocotl", "archesporial",
"hypoplastral", "preoral",
"quailberry", "cinque",
"terrestrially", "stroking",
"limpet", "moodishness",
"canicule", "archididascalian",
"pompiloid", "overstaid",
"introducer", "Italical",
"Christianopaganism", "prescriptible",
"subofficer", "danseuse",
"cloy", "saguran",
"frictionlessly", "deindividualization",
"Bulanda", "ventricous",
"subfoliar", "basto",
"scapuloradial", "suspend",
"stiffish", "Sphenodontidae",
"eternal", "verbid",
"mammonish", "upcushion",
"barkometer", "concretion",
"preagitate", "incomprehensible",
"tristich", "visceral",
"hemimelus", "patroller",
"stentorophonic", "pinulus",
"kerykeion", "brutism",
"monstership", "merciful",
"overinstruct", "defensibly",
"bettermost", "splenauxe",
"Mormyrus", "unreprimanded",
"taver", "ell",
"proacquittal", "infestation",
"overwoven", "Lincolnlike",
"chacona", "Tamil",
"classificational", "lebensraum",
"reeveland", "intuition",
"Whilkut", "focaloid",
"Eleusinian", "micromembrane",
"byroad", "nonrepetition",
"bacterioblast", "brag",
"ribaldrous", "phytoma",
"counteralliance", "pelvimetry",
"pelf", "relaster",
"thermoresistant", "aneurism",
"molossic", "euphonym",
"upswell", "ladhood",
"phallaceous", "inertly",
"gunshop", "stereotypography",
"laryngic", "refasten",
"twinling", "oflete",
"hepatorrhaphy", "electrotechnics",
"cockal", "guitarist",
"topsail", "Cimmerianism",
"larklike", "Llandovery",
"pyrocatechol", "immatchable",
"chooser", "metrocratic",
"craglike", "quadrennial",
"nonpoisonous", "undercolored",
"knob", "ultratense",
"balladmonger", "slait",
"sialadenitis", "bucketer",
"magnificently", "unstipulated",
"unscourged", "unsupercilious",
"packsack", "pansophism",
"soorkee", "percent",
"subirrigate", "champer",
"metapolitics", "spherulitic",
"involatile", "metaphonical",
"stachyuraceous", "speckedness",
"bespin", "proboscidiform",
"gul", "squit",
"yeelaman", "peristeropode",
"opacousness", "shibuichi",
"retinize", "yote",
"misexposition", "devilwise",
"pumpkinification", "vinny",
"bonze", "glossing",
"decardinalize", "transcortical",
"serphoid", "deepmost",
"guanajuatite", "wemless",
"arval", "lammy",
"Effie", "Saponaria",
"tetrahedral", "prolificy",
"excerpt", "dunkadoo",
"Spencerism", "insatiately",
"Gilaki", "oratorship",
"arduousness", "unbashfulness",
"Pithecolobium", "unisexuality",
"veterinarian", "detractive",
"liquidity", "acidophile",
"proauction", "sural",
"totaquina", "Vichyite",
"uninhabitedness", "allegedly",
"Gothish", "manny",
"Inger", "flutist",
"ticktick", "Ludgatian",
"homotransplant", "orthopedical",
"diminutively", "monogoneutic",
"Kenipsim", "sarcologist",
"drome", "stronghearted",
"Fameuse", "Swaziland",
"alen", "chilblain",
"beatable", "agglomeratic",
"constitutor", "tendomucoid",
"porencephalous", "arteriasis",
"boser", "tantivy",
"rede", "lineamental",
"uncontradictableness", "homeotypical",
"masa", "folious",
"dosseret", "neurodegenerative",
"subtransverse", "Chiasmodontidae",
"palaeotheriodont", "unstressedly",
"chalcites", "piquantness",
"lampyrine", "Aplacentalia",
"projecting", "elastivity",
"isopelletierin", "bladderwort",
"strander", "almud",
"iniquitously", "theologal",
"bugre", "chargeably",
"imperceptivity", "meriquinoidal",
"mesophyte", "divinator",
"perfunctory", "counterappellant",
"synovial", "charioteer",
"crystallographical", "comprovincial",
"infrastapedial", "pleasurehood",
"inventurous", "ultrasystematic",
"subangulated", "supraoesophageal",
"Vaishnavism", "transude",
"chrysochrous", "ungrave",
"reconciliable", "uninterpleaded",
"erlking", "wherefrom",
"aprosopia", "antiadiaphorist",
"metoxazine", "incalculable",
"umbellic", "predebit",
"foursquare", "unimmortal",
"nonmanufacture", "slangy",
"predisputant", "familist",
"preaffiliate", "friarhood",
"corelysis", "zoonitic",
"halloo", "paunchy",
"neuromimesis", "aconitine",
"hackneyed", "unfeeble",
"cubby", "autoschediastical",
"naprapath", "lyrebird",
"inexistency", "leucophoenicite",
"ferrogoslarite", "reperuse",
"uncombable", "tambo",
"propodiale", "diplomatize",
"Russifier", "clanned",
"corona", "michigan",
"nonutilitarian", "transcorporeal",
"bought", "Cercosporella",
"stapedius", "glandularly",
"pictorially", "weism",
"disilane", "rainproof",
"Caphtor", "scrubbed",
"oinomancy", "pseudoxanthine",
"nonlustrous", "redesertion",
"Oryzorictinae", "gala",
"Mycogone", "reappreciate",
"cyanoguanidine", "seeingness",
"breadwinner", "noreast",
"furacious", "epauliere",
"omniscribent", "Passiflorales",
"uninductive", "inductivity",
"Orbitolina", "Semecarpus",
"migrainoid", "steprelationship",
"phlogisticate", "mesymnion",
"sloped", "edificator",
"beneficent", "culm",
"paleornithology", "unurban",
"throbless", "amplexifoliate",
"sesquiquintile", "sapience",
"astucious", "dithery",
"boor", "ambitus",
"scotching", "uloid",
"uncompromisingness", "hoove",
"waird", "marshiness",
"Jerusalem", "mericarp",
"unevoked", "benzoperoxide",
"outguess", "pyxie",
"hymnic", "euphemize",
"mendacity", "erythremia",
"rosaniline", "unchatteled",
"lienteria", "Bushongo",
"dialoguer", "unrepealably",
"rivethead", "antideflation",
"vinegarish", "manganosiderite",
"doubtingness", "ovopyriform",
"Cephalodiscus", "Muscicapa",
"Animalivora", "angina",
"planispheric", "ipomoein",
"cuproiodargyrite", "sandbox",
"scrat", "Munnopsidae",
"shola", "pentafid",
"overstudiousness", "times",
"nonprofession", "appetible",
"valvulotomy", "goladar",
"uniarticular", "oxyterpene",
"unlapsing", "omega",
"trophonema", "seminonflammable",
"circumzenithal", "starer",
"depthwise", "liberatress",
"unleavened", "unrevolting",
"groundneedle", "topline",
"wandoo", "umangite",
"ordinant", "unachievable",
"oversand", "snare",
"avengeful", "unexplicit",
"mustafina", "sonable",
"rehabilitative", "eulogization",
"papery", "technopsychology",
"impressor", "cresylite",
"entame", "transudatory",
"scotale", "pachydermatoid",
"imaginary", "yeat",
"slipped", "stewardship",
"adatom", "cockstone",
"skyshine", "heavenful",
"comparability", "exprobratory",
"dermorhynchous", "parquet",
"cretaceous", "vesperal",
"raphis", "undangered",
"Glecoma", "engrain",
"counteractively", "Zuludom",
"orchiocatabasis", "Auriculariales",
"warriorwise", "extraorganismal",
"overbuilt", "alveolite",
"tetchy", "terrificness",
"widdle", "unpremonished",
"rebilling", "sequestrum",
"equiconvex", "heliocentricism",
"catabaptist", "okonite",
"propheticism", "helminthagogic",
"calycular", "giantly",
"wingable", "golem",
"unprovided", "commandingness",
"greave", "haply",
"doina", "depressingly",
"subdentate", "impairment",
"decidable", "neurotrophic",
"unpredict", "bicorporeal",
"pendulant", "flatman",
"intrabred", "toplike",
"Prosobranchiata", "farrantly",
"toxoplasmosis", "gorilloid",
"dipsomaniacal", "aquiline",
"atlantite", "ascitic",
"perculsive", "prospectiveness",
"saponaceous", "centrifugalization",
"dinical", "infravaginal",
"beadroll", "affaite",
"Helvidian", "tickleproof",
"abstractionism", "enhedge",
"outwealth", "overcontribute",
"coldfinch", "gymnastic",
"Pincian", "Munychian",
"codisjunct", "quad",
"coracomandibular", "phoenicochroite",
"amender", "selectivity",
"putative", "semantician",
"lophotrichic", "Spatangoidea",
"saccharogenic", "inferent",
"Triconodonta", "arrendation",
"sheepskin", "taurocolla",
"bunghole", "Machiavel",
"triakistetrahedral", "dehairer",
"prezygapophysial", "cylindric",
"pneumonalgia", "sleigher",
"emir", "Socraticism",
"licitness", "massedly",
"instructiveness", "sturdied",
"redecrease", "starosta",
"evictor", "orgiastic",
"squdge", "meloplasty",
"Tsonecan", "repealableness",
"swoony", "myesthesia",
"molecule", "autobiographist",
"reciprocation", "refective",
"unobservantness", "tricae",
"ungouged", "floatability",
"Mesua", "fetlocked",
"chordacentrum", "sedentariness",
"various", "laubanite",
"nectopod", "zenick",
"sequentially", "analgic",
"biodynamics", "posttraumatic",
"nummi", "pyroacetic",
"bot", "redescend",
"dispermy", "undiffusive",
"circular", "trillion",
"Uraniidae", "ploration",
"discipular", "potentness",
"sud", "Hu",
"Eryon", "plugger",
"subdrainage", "jharal",
"abscission", "supermarket",
"countergabion", "glacierist",
"lithotresis", "minniebush",
"zanyism", "eucalypteol",
"sterilely", "unrealize",
"unpatched", "hypochondriacism",
"critically", "cheesecutter",
};
}
/**
* This is the main routine for launching a distributed random write job.
* It runs 10 maps/node and each node writes 1 gig of data to a DFS file.
* The reduce doesn't do anything.
*
* @throws IOException
*/
public int run(String[] args) throws Exception {
if (args.length == 0) {
return printUsage();
}
Job job = createJob(getConf());
FileOutputFormat.setOutputPath(job, new Path(args[0]));
Date startTime = new Date();
System.out.println("Job started: " + startTime);
int ret = job.waitForCompletion(true) ? 0 : 1;
Date endTime = new Date();
System.out.println("Job ended: " + endTime);
System.out.println("The job took " +
(endTime.getTime() - startTime.getTime()) /1000 +
" seconds.");
return ret;
}
static int printUsage() {
System.out.println("randomtextwriter " +
"[-outFormat <output format class>] " +
"<output>");
ToolRunner.printGenericCommandUsage(System.out);
return 2;
}
public static void main(String[] args) throws Exception {
int res = ToolRunner.run(new Configuration(), new RandomTextWriterJob(),
args);
System.exit(res);
}
}
| 25,737 | 32.910408 | 84 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/FailMapper.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop;
import java.io.IOException;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.mapred.MapReduceBase;
import org.apache.hadoop.mapred.Mapper;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Reporter;
// Mapper that fails
public class FailMapper extends MapReduceBase implements
Mapper<WritableComparable, Writable, WritableComparable, Writable> {
public void map(WritableComparable key, Writable value,
OutputCollector<WritableComparable, Writable> out, Reporter reporter)
throws IOException {
// NOTE- the next line is required for the TestDebugScript test to succeed
System.err.println("failing map");
throw new RuntimeException("failing map");
}
}
| 1,614 | 37.452381 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/FailingMapper.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop;
import java.io.IOException;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
/**
* Fails the Mapper. First attempt throws exception. Rest do System.exit.
*
*/
public class FailingMapper extends Mapper<Text, Text, Text, Text> {
public void map(Text key, Text value,
Context context) throws IOException,InterruptedException {
// Just create a non-daemon thread which hangs forever. MR AM should not be
// hung by this.
new Thread() {
@Override
public void run() {
synchronized (this) {
try {
wait();
} catch (InterruptedException e) {
//
}
}
}
}.start();
if (context.getTaskAttemptID().getId() == 0) {
System.out.println("Attempt:" + context.getTaskAttemptID() +
" Failing mapper throwing exception");
throw new IOException("Attempt:" + context.getTaskAttemptID() +
" Failing mapper throwing exception");
} else {
System.out.println("Attempt:" + context.getTaskAttemptID() +
" Exiting");
System.exit(-1);
}
}
}
| 1,941 | 30.836066 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/cli/CLITestCmdMR.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p/>
* http://www.apache.org/licenses/LICENSE-2.0
* <p/>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.cli;
import org.apache.hadoop.cli.util.CLICommandTypes;
import org.apache.hadoop.cli.util.CLITestCmd;
import org.apache.hadoop.cli.util.CommandExecutor;
public class CLITestCmdMR extends CLITestCmd {
public CLITestCmdMR(String str, CLICommandTypes type) {
super(str, type);
}
/**
* This is not implemented because HadoopArchive constructor requires JobConf
* to create an archive object. Because TestMRCLI uses setup method from
* TestHDFSCLI the initialization of executor objects happens before a config
* is created and updated. Thus, actual calls to executors happen in the body
* of the test method.
*/
@Override
public CommandExecutor getExecutor(String tag)
throws IllegalArgumentException {
throw new IllegalArgumentException("Method isn't supported");
}
}
| 1,651 | 38.333333 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/cli/util/CLICommandArchive.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p/>
* http://www.apache.org/licenses/LICENSE-2.0
* <p/>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.cli.util;
public class CLICommandArchive implements CLICommandTypes {
}
| 912 | 40.5 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/cli/util/CLICommandMRAdmin.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p/>
* http://www.apache.org/licenses/LICENSE-2.0
* <p/>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.cli.util;
public class CLICommandMRAdmin implements CLICommandTypes {
}
| 912 | 40.5 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/io/TestSequenceFileMergeProgress.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.Random;
import org.apache.hadoop.fs.*;
import org.apache.hadoop.io.*;
import org.apache.hadoop.io.SequenceFile.CompressionType;
import org.apache.hadoop.io.SequenceFile.Sorter.RawKeyValueIterator;
import org.apache.hadoop.io.SequenceFile.Sorter.SegmentDescriptor;
import org.apache.hadoop.io.compress.CompressionCodec;
import org.apache.hadoop.io.compress.DefaultCodec;
import org.apache.hadoop.mapred.*;
import junit.framework.TestCase;
import org.apache.commons.logging.*;
public class TestSequenceFileMergeProgress extends TestCase {
private static final Log LOG = FileInputFormat.LOG;
private static final int RECORDS = 10000;
public void testMergeProgressWithNoCompression() throws IOException {
runTest(SequenceFile.CompressionType.NONE);
}
public void testMergeProgressWithRecordCompression() throws IOException {
runTest(SequenceFile.CompressionType.RECORD);
}
public void testMergeProgressWithBlockCompression() throws IOException {
runTest(SequenceFile.CompressionType.BLOCK);
}
public void runTest(CompressionType compressionType) throws IOException {
JobConf job = new JobConf();
FileSystem fs = FileSystem.getLocal(job);
Path dir = new Path(System.getProperty("test.build.data",".") + "/mapred");
Path file = new Path(dir, "test.seq");
Path tempDir = new Path(dir, "tmp");
fs.delete(dir, true);
FileInputFormat.setInputPaths(job, dir);
fs.mkdirs(tempDir);
LongWritable tkey = new LongWritable();
Text tval = new Text();
SequenceFile.Writer writer =
SequenceFile.createWriter(fs, job, file, LongWritable.class, Text.class,
compressionType, new DefaultCodec());
try {
for (int i = 0; i < RECORDS; ++i) {
tkey.set(1234);
tval.set("valuevaluevaluevaluevaluevaluevaluevaluevaluevaluevalue");
writer.append(tkey, tval);
}
} finally {
writer.close();
}
long fileLength = fs.getFileStatus(file).getLen();
LOG.info("With compression = " + compressionType + ": "
+ "compressed length = " + fileLength);
SequenceFile.Sorter sorter = new SequenceFile.Sorter(fs,
job.getOutputKeyComparator(), job.getMapOutputKeyClass(),
job.getMapOutputValueClass(), job);
Path[] paths = new Path[] {file};
RawKeyValueIterator rIter = sorter.merge(paths, tempDir, false);
int count = 0;
while (rIter.next()) {
count++;
}
assertEquals(RECORDS, count);
assertEquals(1.0f, rIter.getProgress().get());
}
}
| 3,459 | 33.949495 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/io/FileBench.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Date;
import java.util.EnumSet;
import java.util.HashMap;
import java.util.Map;
import java.util.Random;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.compress.CompressionCodec;
import org.apache.hadoop.io.compress.GzipCodec;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.*;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
public class FileBench extends Configured implements Tool {
static int printUsage() {
ToolRunner.printGenericCommandUsage(System.out);
System.out.println(
"Usage: Task list: -[no]r -[no]w\n" +
" Format: -[no]seq -[no]txt\n" +
" CompressionCodec: -[no]zip -[no]pln\n" +
" CompressionType: -[no]blk -[no]rec\n" +
" Required: -dir <working dir>\n" +
"All valid combinations are implicitly enabled, unless an option is enabled\n" +
"explicitly. For example, specifying \"-zip\", excludes -pln,\n" +
"unless they are also explicitly included, as in \"-pln -zip\"\n" +
"Note that CompressionType params only apply to SequenceFiles\n\n" +
"Useful options to set:\n" +
"-D fs.defaultFS=\"file:///\" \\\n" +
"-D fs.file.impl=org.apache.hadoop.fs.RawLocalFileSystem \\\n" +
"-D filebench.file.bytes=$((10*1024*1024*1024)) \\\n" +
"-D filebench.key.words=5 \\\n" +
"-D filebench.val.words=20\n");
return -1;
}
static String[] keys;
static String[] values;
static StringBuilder sentence = new StringBuilder();
private static String generateSentence(Random r, int noWords) {
sentence.setLength(0);
for (int i=0; i < noWords; ++i) {
sentence.append(words[r.nextInt(words.length)]);
sentence.append(" ");
}
return sentence.toString();
}
// fill keys, values with ~1.5 blocks for block-compressed seq fill
private static void fillBlocks(JobConf conf) {
Random r = new Random();
long seed = conf.getLong("filebench.seed", -1);
if (seed > 0) {
r.setSeed(seed);
}
int keylen = conf.getInt("filebench.key.words", 5);
int vallen = conf.getInt("filebench.val.words", 20);
int acc = (3 * conf.getInt("io.seqfile.compress.blocksize", 1000000)) >> 1;
ArrayList<String> k = new ArrayList<String>();
ArrayList<String> v = new ArrayList<String>();
for (int i = 0; acc > 0; ++i) {
String s = generateSentence(r, keylen);
acc -= s.length();
k.add(s);
s = generateSentence(r, vallen);
acc -= s.length();
v.add(s);
}
keys = k.toArray(new String[0]);
values = v.toArray(new String[0]);
}
@SuppressWarnings("unchecked") // OutputFormat instantiation
static long writeBench(JobConf conf) throws IOException {
long filelen = conf.getLong("filebench.file.bytes", 5 * 1024 * 1024 * 1024);
Text key = new Text();
Text val = new Text();
final String fn = conf.get("test.filebench.name", "");
final Path outd = FileOutputFormat.getOutputPath(conf);
conf.set("mapred.work.output.dir", outd.toString());
OutputFormat outf = conf.getOutputFormat();
RecordWriter<Text,Text> rw =
outf.getRecordWriter(outd.getFileSystem(conf), conf, fn,
Reporter.NULL);
try {
long acc = 0L;
Date start = new Date();
for (int i = 0; acc < filelen; ++i) {
i %= keys.length;
key.set(keys[i]);
val.set(values[i]);
rw.write(key, val);
acc += keys[i].length();
acc += values[i].length();
}
Date end = new Date();
return end.getTime() - start.getTime();
} finally {
rw.close(Reporter.NULL);
}
}
@SuppressWarnings("unchecked") // InputFormat instantiation
static long readBench(JobConf conf) throws IOException {
InputFormat inf = conf.getInputFormat();
final String fn = conf.get("test.filebench.name", "");
Path pin = new Path(FileInputFormat.getInputPaths(conf)[0], fn);
FileStatus in = pin.getFileSystem(conf).getFileStatus(pin);
RecordReader rr = inf.getRecordReader(new FileSplit(pin, 0, in.getLen(),
(String[])null), conf, Reporter.NULL);
try {
Object key = rr.createKey();
Object val = rr.createValue();
Date start = new Date();
while (rr.next(key, val));
Date end = new Date();
return end.getTime() - start.getTime();
} finally {
rr.close();
}
}
public static void main(String[] args) throws Exception {
int res = ToolRunner.run(new Configuration(), new FileBench(), args);
System.exit(res);
}
/**
* Process params from command line and run set of benchmarks specified.
*/
public int run(String[] argv) throws IOException {
JobConf job = new JobConf(getConf());
EnumSet<CCodec> cc = null;
EnumSet<CType> ct = null;
EnumSet<Format> f = null;
EnumSet<RW> rw = null;
Path root = null;
FileSystem fs = FileSystem.get(job);
for(int i = 0; i < argv.length; ++i) {
try {
if ("-dir".equals(argv[i])) {
root = new Path(argv[++i]).makeQualified(fs);
System.out.println("DIR: " + root.toString());
} else if ("-seed".equals(argv[i])) {
job.setLong("filebench.seed", Long.valueOf(argv[++i]));
} else if (argv[i].startsWith("-no")) {
String arg = argv[i].substring(3);
cc = rem(CCodec.class, cc, arg);
ct = rem(CType.class, ct, arg);
f = rem(Format.class, f, arg);
rw = rem(RW.class, rw, arg);
} else {
String arg = argv[i].substring(1);
cc = add(CCodec.class, cc, arg);
ct = add(CType.class, ct, arg);
f = add(Format.class, f, arg);
rw = add(RW.class, rw, arg);
}
} catch (Exception e) {
throw (IOException)new IOException().initCause(e);
}
}
if (null == root) {
System.out.println("Missing -dir param");
printUsage();
return -1;
}
fillBlocks(job);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(Text.class);
FileInputFormat.setInputPaths(job, root);
FileOutputFormat.setOutputPath(job, root);
if (null == cc) cc = EnumSet.allOf(CCodec.class);
if (null == ct) ct = EnumSet.allOf(CType.class);
if (null == f) f = EnumSet.allOf(Format.class);
if (null == rw) rw = EnumSet.allOf(RW.class);
for (RW rwop : rw) {
for (Format fmt : f) {
fmt.configure(job);
for (CCodec cod : cc) {
cod.configure(job);
if (!(fmt == Format.txt || cod == CCodec.pln)) {
for (CType typ : ct) {
String fn =
StringUtils.toUpperCase(fmt.name()) + "_" +
StringUtils.toUpperCase(cod.name()) + "_" +
StringUtils.toUpperCase(typ.name());
typ.configure(job);
System.out.print(
StringUtils.toUpperCase(rwop.name()) + " " + fn + ": ");
System.out.println(rwop.exec(fn, job) / 1000 +
" seconds");
}
} else {
String fn =
StringUtils.toUpperCase(fmt.name()) + "_" +
StringUtils.toUpperCase(cod.name());
Path p = new Path(root, fn);
if (rwop == RW.r && !fs.exists(p)) {
fn += cod.getExt();
}
System.out.print(
StringUtils.toUpperCase(rwop.name()) + " " + fn + ": ");
System.out.println(rwop.exec(fn, job) / 1000 +
" seconds");
}
}
}
}
return 0;
}
// overwrought argument processing and wordlist follow
enum CCodec {
zip(GzipCodec.class, ".gz"), pln(null, "");
Class<? extends CompressionCodec> inf;
String ext;
CCodec(Class<? extends CompressionCodec> inf, String ext) {
this.inf = inf;
this.ext = ext;
}
public void configure(JobConf job) {
if (inf != null) {
job.setBoolean("mapred.output.compress", true);
job.setClass("mapred.output.compression.codec", inf,
CompressionCodec.class);
} else {
job.setBoolean("mapred.output.compress", false);
}
}
public String getExt() { return ext; }
}
enum CType {
blk("BLOCK"),
rec("RECORD");
String typ;
CType(String typ) { this.typ = typ; }
public void configure(JobConf job) {
job.set("mapred.map.output.compression.type", typ);
job.set("mapred.output.compression.type", typ);
}
}
enum Format {
seq(SequenceFileInputFormat.class, SequenceFileOutputFormat.class),
txt(TextInputFormat.class, TextOutputFormat.class);
Class<? extends InputFormat> inf;
Class<? extends OutputFormat> of;
Format(Class<? extends InputFormat> inf, Class<? extends OutputFormat> of) {
this.inf = inf;
this.of = of;
}
public void configure(JobConf job) {
if (null != inf) job.setInputFormat(inf);
if (null != of) job.setOutputFormat(of);
}
}
enum RW {
w() {
public long exec(String fn, JobConf job) throws IOException {
job.set("test.filebench.name", fn);
return writeBench(job);
}
},
r() {
public long exec(String fn, JobConf job) throws IOException {
job.set("test.filebench.name", fn);
return readBench(job);
}
};
public abstract long exec(String fn, JobConf job) throws IOException;
}
static Map<Class<? extends Enum>, Map<String,? extends Enum>> fullmap
= new HashMap<Class<? extends Enum>, Map<String,? extends Enum>>();
static {
// can't effectively use Enum::valueOf
Map<String,CCodec> m1 = new HashMap<String,CCodec>();
for (CCodec v : CCodec.values()) m1.put(v.name(), v);
fullmap.put(CCodec.class, m1);
Map<String,CType> m2 = new HashMap<String,CType>();
for (CType v : CType.values()) m2.put(v.name(), v);
fullmap.put(CType.class, m2);
Map<String,Format> m3 = new HashMap<String,Format>();
for (Format v : Format.values()) m3.put(v.name(), v);
fullmap.put(Format.class, m3);
Map<String,RW> m4 = new HashMap<String,RW>();
for (RW v : RW.values()) m4.put(v.name(), v);
fullmap.put(RW.class, m4);
}
public static <T extends Enum<T>> EnumSet<T> rem(Class<T> c,
EnumSet<T> set, String s) {
if (null != fullmap.get(c) && fullmap.get(c).get(s) != null) {
if (null == set) {
set = EnumSet.allOf(c);
}
set.remove(fullmap.get(c).get(s));
}
return set;
}
@SuppressWarnings("unchecked")
public static <T extends Enum<T>> EnumSet<T> add(Class<T> c,
EnumSet<T> set, String s) {
if (null != fullmap.get(c) && fullmap.get(c).get(s) != null) {
if (null == set) {
set = EnumSet.noneOf(c);
}
set.add((T)fullmap.get(c).get(s));
}
return set;
}
/**
* A random list of 1000 words from /usr/share/dict/words
*/
private static final String[] words = {
"diurnalness", "Homoiousian", "spiranthic", "tetragynian",
"silverhead", "ungreat", "lithograph", "exploiter",
"physiologian", "by", "hellbender", "Filipendula",
"undeterring", "antiscolic", "pentagamist", "hypoid",
"cacuminal", "sertularian", "schoolmasterism", "nonuple",
"gallybeggar", "phytonic", "swearingly", "nebular",
"Confervales", "thermochemically", "characinoid", "cocksuredom",
"fallacious", "feasibleness", "debromination", "playfellowship",
"tramplike", "testa", "participatingly", "unaccessible",
"bromate", "experientialist", "roughcast", "docimastical",
"choralcelo", "blightbird", "peptonate", "sombreroed",
"unschematized", "antiabolitionist", "besagne", "mastication",
"bromic", "sviatonosite", "cattimandoo", "metaphrastical",
"endotheliomyoma", "hysterolysis", "unfulminated", "Hester",
"oblongly", "blurredness", "authorling", "chasmy",
"Scorpaenidae", "toxihaemia", "Dictograph", "Quakerishly",
"deaf", "timbermonger", "strammel", "Thraupidae",
"seditious", "plerome", "Arneb", "eristically",
"serpentinic", "glaumrie", "socioromantic", "apocalypst",
"tartrous", "Bassaris", "angiolymphoma", "horsefly",
"kenno", "astronomize", "euphemious", "arsenide",
"untongued", "parabolicness", "uvanite", "helpless",
"gemmeous", "stormy", "templar", "erythrodextrin",
"comism", "interfraternal", "preparative", "parastas",
"frontoorbital", "Ophiosaurus", "diopside", "serosanguineous",
"ununiformly", "karyological", "collegian", "allotropic",
"depravity", "amylogenesis", "reformatory", "epidymides",
"pleurotropous", "trillium", "dastardliness", "coadvice",
"embryotic", "benthonic", "pomiferous", "figureheadship",
"Megaluridae", "Harpa", "frenal", "commotion",
"abthainry", "cobeliever", "manilla", "spiciferous",
"nativeness", "obispo", "monilioid", "biopsic",
"valvula", "enterostomy", "planosubulate", "pterostigma",
"lifter", "triradiated", "venialness", "tum",
"archistome", "tautness", "unswanlike", "antivenin",
"Lentibulariaceae", "Triphora", "angiopathy", "anta",
"Dawsonia", "becomma", "Yannigan", "winterproof",
"antalgol", "harr", "underogating", "ineunt",
"cornberry", "flippantness", "scyphostoma", "approbation",
"Ghent", "Macraucheniidae", "scabbiness", "unanatomized",
"photoelasticity", "eurythermal", "enation", "prepavement",
"flushgate", "subsequentially", "Edo", "antihero",
"Isokontae", "unforkedness", "porriginous", "daytime",
"nonexecutive", "trisilicic", "morphiomania", "paranephros",
"botchedly", "impugnation", "Dodecatheon", "obolus",
"unburnt", "provedore", "Aktistetae", "superindifference",
"Alethea", "Joachimite", "cyanophilous", "chorograph",
"brooky", "figured", "periclitation", "quintette",
"hondo", "ornithodelphous", "unefficient", "pondside",
"bogydom", "laurinoxylon", "Shiah", "unharmed",
"cartful", "noncrystallized", "abusiveness", "cromlech",
"japanned", "rizzomed", "underskin", "adscendent",
"allectory", "gelatinousness", "volcano", "uncompromisingly",
"cubit", "idiotize", "unfurbelowed", "undinted",
"magnetooptics", "Savitar", "diwata", "ramosopalmate",
"Pishquow", "tomorn", "apopenptic", "Haversian",
"Hysterocarpus", "ten", "outhue", "Bertat",
"mechanist", "asparaginic", "velaric", "tonsure",
"bubble", "Pyrales", "regardful", "glyphography",
"calabazilla", "shellworker", "stradametrical", "havoc",
"theologicopolitical", "sawdust", "diatomaceous", "jajman",
"temporomastoid", "Serrifera", "Ochnaceae", "aspersor",
"trailmaking", "Bishareen", "digitule", "octogynous",
"epididymitis", "smokefarthings", "bacillite", "overcrown",
"mangonism", "sirrah", "undecorated", "psychofugal",
"bismuthiferous", "rechar", "Lemuridae", "frameable",
"thiodiazole", "Scanic", "sportswomanship", "interruptedness",
"admissory", "osteopaedion", "tingly", "tomorrowness",
"ethnocracy", "trabecular", "vitally", "fossilism",
"adz", "metopon", "prefatorial", "expiscate",
"diathermacy", "chronist", "nigh", "generalizable",
"hysterogen", "aurothiosulphuric", "whitlowwort", "downthrust",
"Protestantize", "monander", "Itea", "chronographic",
"silicize", "Dunlop", "eer", "componental",
"spot", "pamphlet", "antineuritic", "paradisean",
"interruptor", "debellator", "overcultured", "Florissant",
"hyocholic", "pneumatotherapy", "tailoress", "rave",
"unpeople", "Sebastian", "thermanesthesia", "Coniferae",
"swacking", "posterishness", "ethmopalatal", "whittle",
"analgize", "scabbardless", "naught", "symbiogenetically",
"trip", "parodist", "columniform", "trunnel",
"yawler", "goodwill", "pseudohalogen", "swangy",
"cervisial", "mediateness", "genii", "imprescribable",
"pony", "consumptional", "carposporangial", "poleax",
"bestill", "subfebrile", "sapphiric", "arrowworm",
"qualminess", "ultraobscure", "thorite", "Fouquieria",
"Bermudian", "prescriber", "elemicin", "warlike",
"semiangle", "rotular", "misthread", "returnability",
"seraphism", "precostal", "quarried", "Babylonism",
"sangaree", "seelful", "placatory", "pachydermous",
"bozal", "galbulus", "spermaphyte", "cumbrousness",
"pope", "signifier", "Endomycetaceae", "shallowish",
"sequacity", "periarthritis", "bathysphere", "pentosuria",
"Dadaism", "spookdom", "Consolamentum", "afterpressure",
"mutter", "louse", "ovoviviparous", "corbel",
"metastoma", "biventer", "Hydrangea", "hogmace",
"seizing", "nonsuppressed", "oratorize", "uncarefully",
"benzothiofuran", "penult", "balanocele", "macropterous",
"dishpan", "marten", "absvolt", "jirble",
"parmelioid", "airfreighter", "acocotl", "archesporial",
"hypoplastral", "preoral", "quailberry", "cinque",
"terrestrially", "stroking", "limpet", "moodishness",
"canicule", "archididascalian", "pompiloid", "overstaid",
"introducer", "Italical", "Christianopaganism", "prescriptible",
"subofficer", "danseuse", "cloy", "saguran",
"frictionlessly", "deindividualization", "Bulanda", "ventricous",
"subfoliar", "basto", "scapuloradial", "suspend",
"stiffish", "Sphenodontidae", "eternal", "verbid",
"mammonish", "upcushion", "barkometer", "concretion",
"preagitate", "incomprehensible", "tristich", "visceral",
"hemimelus", "patroller", "stentorophonic", "pinulus",
"kerykeion", "brutism", "monstership", "merciful",
"overinstruct", "defensibly", "bettermost", "splenauxe",
"Mormyrus", "unreprimanded", "taver", "ell",
"proacquittal", "infestation", "overwoven", "Lincolnlike",
"chacona", "Tamil", "classificational", "lebensraum",
"reeveland", "intuition", "Whilkut", "focaloid",
"Eleusinian", "micromembrane", "byroad", "nonrepetition",
"bacterioblast", "brag", "ribaldrous", "phytoma",
"counteralliance", "pelvimetry", "pelf", "relaster",
"thermoresistant", "aneurism", "molossic", "euphonym",
"upswell", "ladhood", "phallaceous", "inertly",
"gunshop", "stereotypography", "laryngic", "refasten",
"twinling", "oflete", "hepatorrhaphy", "electrotechnics",
"cockal", "guitarist", "topsail", "Cimmerianism",
"larklike", "Llandovery", "pyrocatechol", "immatchable",
"chooser", "metrocratic", "craglike", "quadrennial",
"nonpoisonous", "undercolored", "knob", "ultratense",
"balladmonger", "slait", "sialadenitis", "bucketer",
"magnificently", "unstipulated", "unscourged", "unsupercilious",
"packsack", "pansophism", "soorkee", "percent",
"subirrigate", "champer", "metapolitics", "spherulitic",
"involatile", "metaphonical", "stachyuraceous", "speckedness",
"bespin", "proboscidiform", "gul", "squit",
"yeelaman", "peristeropode", "opacousness", "shibuichi",
"retinize", "yote", "misexposition", "devilwise",
"pumpkinification", "vinny", "bonze", "glossing",
"decardinalize", "transcortical", "serphoid", "deepmost",
"guanajuatite", "wemless", "arval", "lammy",
"Effie", "Saponaria", "tetrahedral", "prolificy",
"excerpt", "dunkadoo", "Spencerism", "insatiately",
"Gilaki", "oratorship", "arduousness", "unbashfulness",
"Pithecolobium", "unisexuality", "veterinarian", "detractive",
"liquidity", "acidophile", "proauction", "sural",
"totaquina", "Vichyite", "uninhabitedness", "allegedly",
"Gothish", "manny", "Inger", "flutist",
"ticktick", "Ludgatian", "homotransplant", "orthopedical",
"diminutively", "monogoneutic", "Kenipsim", "sarcologist",
"drome", "stronghearted", "Fameuse", "Swaziland",
"alen", "chilblain", "beatable", "agglomeratic",
"constitutor", "tendomucoid", "porencephalous", "arteriasis",
"boser", "tantivy", "rede", "lineamental",
"uncontradictableness", "homeotypical", "masa", "folious",
"dosseret", "neurodegenerative", "subtransverse", "Chiasmodontidae",
"palaeotheriodont", "unstressedly", "chalcites", "piquantness",
"lampyrine", "Aplacentalia", "projecting", "elastivity",
"isopelletierin", "bladderwort", "strander", "almud",
"iniquitously", "theologal", "bugre", "chargeably",
"imperceptivity", "meriquinoidal", "mesophyte", "divinator",
"perfunctory", "counterappellant", "synovial", "charioteer",
"crystallographical", "comprovincial", "infrastapedial", "pleasurehood",
"inventurous", "ultrasystematic", "subangulated", "supraoesophageal",
"Vaishnavism", "transude", "chrysochrous", "ungrave",
"reconciliable", "uninterpleaded", "erlking", "wherefrom",
"aprosopia", "antiadiaphorist", "metoxazine", "incalculable",
"umbellic", "predebit", "foursquare", "unimmortal",
"nonmanufacture", "slangy", "predisputant", "familist",
"preaffiliate", "friarhood", "corelysis", "zoonitic",
"halloo", "paunchy", "neuromimesis", "aconitine",
"hackneyed", "unfeeble", "cubby", "autoschediastical",
"naprapath", "lyrebird", "inexistency", "leucophoenicite",
"ferrogoslarite", "reperuse", "uncombable", "tambo",
"propodiale", "diplomatize", "Russifier", "clanned",
"corona", "michigan", "nonutilitarian", "transcorporeal",
"bought", "Cercosporella", "stapedius", "glandularly",
"pictorially", "weism", "disilane", "rainproof",
"Caphtor", "scrubbed", "oinomancy", "pseudoxanthine",
"nonlustrous", "redesertion", "Oryzorictinae", "gala",
"Mycogone", "reappreciate", "cyanoguanidine", "seeingness",
"breadwinner", "noreast", "furacious", "epauliere",
"omniscribent", "Passiflorales", "uninductive", "inductivity",
"Orbitolina", "Semecarpus", "migrainoid", "steprelationship",
"phlogisticate", "mesymnion", "sloped", "edificator",
"beneficent", "culm", "paleornithology", "unurban",
"throbless", "amplexifoliate", "sesquiquintile", "sapience",
"astucious", "dithery", "boor", "ambitus",
"scotching", "uloid", "uncompromisingness", "hoove",
"waird", "marshiness", "Jerusalem", "mericarp",
"unevoked", "benzoperoxide", "outguess", "pyxie",
"hymnic", "euphemize", "mendacity", "erythremia",
"rosaniline", "unchatteled", "lienteria", "Bushongo",
"dialoguer", "unrepealably", "rivethead", "antideflation",
"vinegarish", "manganosiderite", "doubtingness", "ovopyriform",
"Cephalodiscus", "Muscicapa", "Animalivora", "angina",
"planispheric", "ipomoein", "cuproiodargyrite", "sandbox",
"scrat", "Munnopsidae", "shola", "pentafid",
"overstudiousness", "times", "nonprofession", "appetible",
"valvulotomy", "goladar", "uniarticular", "oxyterpene",
"unlapsing", "omega", "trophonema", "seminonflammable",
"circumzenithal", "starer", "depthwise", "liberatress",
"unleavened", "unrevolting", "groundneedle", "topline",
"wandoo", "umangite", "ordinant", "unachievable",
"oversand", "snare", "avengeful", "unexplicit",
"mustafina", "sonable", "rehabilitative", "eulogization",
"papery", "technopsychology", "impressor", "cresylite",
"entame", "transudatory", "scotale", "pachydermatoid",
"imaginary", "yeat", "slipped", "stewardship",
"adatom", "cockstone", "skyshine", "heavenful",
"comparability", "exprobratory", "dermorhynchous", "parquet",
"cretaceous", "vesperal", "raphis", "undangered",
"Glecoma", "engrain", "counteractively", "Zuludom",
"orchiocatabasis", "Auriculariales", "warriorwise", "extraorganismal",
"overbuilt", "alveolite", "tetchy", "terrificness",
"widdle", "unpremonished", "rebilling", "sequestrum",
"equiconvex", "heliocentricism", "catabaptist", "okonite",
"propheticism", "helminthagogic", "calycular", "giantly",
"wingable", "golem", "unprovided", "commandingness",
"greave", "haply", "doina", "depressingly",
"subdentate", "impairment", "decidable", "neurotrophic",
"unpredict", "bicorporeal", "pendulant", "flatman",
"intrabred", "toplike", "Prosobranchiata", "farrantly",
"toxoplasmosis", "gorilloid", "dipsomaniacal", "aquiline",
"atlantite", "ascitic", "perculsive", "prospectiveness",
"saponaceous", "centrifugalization", "dinical", "infravaginal",
"beadroll", "affaite", "Helvidian", "tickleproof",
"abstractionism", "enhedge", "outwealth", "overcontribute",
"coldfinch", "gymnastic", "Pincian", "Munychian",
"codisjunct", "quad", "coracomandibular", "phoenicochroite",
"amender", "selectivity", "putative", "semantician",
"lophotrichic", "Spatangoidea", "saccharogenic", "inferent",
"Triconodonta", "arrendation", "sheepskin", "taurocolla",
"bunghole", "Machiavel", "triakistetrahedral", "dehairer",
"prezygapophysial", "cylindric", "pneumonalgia", "sleigher",
"emir", "Socraticism", "licitness", "massedly",
"instructiveness", "sturdied", "redecrease", "starosta",
"evictor", "orgiastic", "squdge", "meloplasty",
"Tsonecan", "repealableness", "swoony", "myesthesia",
"molecule", "autobiographist", "reciprocation", "refective",
"unobservantness", "tricae", "ungouged", "floatability",
"Mesua", "fetlocked", "chordacentrum", "sedentariness",
"various", "laubanite", "nectopod", "zenick",
"sequentially", "analgic", "biodynamics", "posttraumatic",
"nummi", "pyroacetic", "bot", "redescend",
"dispermy", "undiffusive", "circular", "trillion",
"Uraniidae", "ploration", "discipular", "potentness",
"sud", "Hu", "Eryon", "plugger",
"subdrainage", "jharal", "abscission", "supermarket",
"countergabion", "glacierist", "lithotresis", "minniebush",
"zanyism", "eucalypteol", "sterilely", "unrealize",
"unpatched", "hypochondriacism", "critically", "cheesecutter",
};
}
| 26,738 | 43.051071 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestNoJobSetupCleanup.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce;
import java.io.File;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathFilter;
import org.apache.hadoop.fs.FileAlreadyExistsException;
import org.apache.hadoop.mapred.HadoopTestCase;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
import org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl;
import org.junit.Ignore;
@Ignore
public class TestNoJobSetupCleanup extends HadoopTestCase {
private static String TEST_ROOT_DIR =
new File(System.getProperty("test.build.data","/tmp"))
.toURI().toString().replace(' ', '+');
private final Path inDir = new Path(TEST_ROOT_DIR, "./wc/input");
private final Path outDir = new Path(TEST_ROOT_DIR, "./wc/output");
public TestNoJobSetupCleanup() throws IOException {
super(HadoopTestCase.CLUSTER_MR , HadoopTestCase.LOCAL_FS, 2, 2);
}
private Job submitAndValidateJob(Configuration conf, int numMaps, int numReds)
throws IOException, InterruptedException, ClassNotFoundException {
Job job = MapReduceTestUtil.createJob(conf, inDir, outDir,
numMaps, numReds);
job.setJobSetupCleanupNeeded(false);
job.setOutputFormatClass(MyOutputFormat.class);
job.waitForCompletion(true);
assertTrue(job.isSuccessful());
assertTrue(job.getTaskReports(TaskType.JOB_SETUP).length == 0);
assertTrue(job.getTaskReports(TaskType.JOB_CLEANUP).length == 0);
assertTrue(job.getTaskReports(TaskType.MAP).length == numMaps);
assertTrue(job.getTaskReports(TaskType.REDUCE).length == numReds);
FileSystem fs = FileSystem.get(conf);
assertTrue("Job output directory doesn't exit!", fs.exists(outDir));
// job commit done only in cleanup
// therefore output should still be in temp location
String tempWorkingPathStr = outDir + Path.SEPARATOR + "_temporary"
+ Path.SEPARATOR + "0";
Path tempWorkingPath = new Path(tempWorkingPathStr);
FileStatus[] list = fs.listStatus(tempWorkingPath, new OutputFilter());
int numPartFiles = numReds == 0 ? numMaps : numReds;
assertTrue("Number of part-files is " + list.length + " and not "
+ numPartFiles, list.length == numPartFiles);
return job;
}
public void testNoJobSetupCleanup() throws Exception {
try {
Configuration conf = createJobConf();
// run a job without job-setup and cleanup
submitAndValidateJob(conf, 1, 1);
// run a map only job.
submitAndValidateJob(conf, 1, 0);
// run empty job without job setup and cleanup
submitAndValidateJob(conf, 0, 0);
// run empty job without job setup and cleanup, with non-zero reduces
submitAndValidateJob(conf, 0, 1);
} finally {
tearDown();
}
}
public static class MyOutputFormat extends TextOutputFormat {
public void checkOutputSpecs(JobContext job)
throws FileAlreadyExistsException, IOException{
super.checkOutputSpecs(job);
// creating dummy TaskAttemptID
TaskAttemptID tid = new TaskAttemptID("jt", 1, TaskType.JOB_SETUP, 0, 0);
getOutputCommitter(new TaskAttemptContextImpl(job.getConfiguration(), tid)).
setupJob(job);
}
}
private static class OutputFilter implements PathFilter {
public boolean accept(Path path) {
return !(path.getName().startsWith("_"));
}
}
}
| 4,306 | 38.513761 | 82 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestYarnClientProtocolProvider.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce;
import static org.mockito.Matchers.any;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
import java.io.IOException;
import java.nio.ByteBuffer;
import junit.framework.TestCase;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.LocalJobRunner;
import org.apache.hadoop.mapred.ResourceMgrDelegate;
import org.apache.hadoop.mapred.YARNRunner;
import org.apache.hadoop.mapreduce.protocol.ClientProtocol;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.yarn.api.ApplicationClientProtocol;
import org.apache.hadoop.yarn.api.protocolrecords.GetDelegationTokenRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetDelegationTokenResponse;
import org.apache.hadoop.yarn.client.api.impl.YarnClientImpl;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.factories.RecordFactory;
import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
import org.junit.Test;
public class TestYarnClientProtocolProvider extends TestCase {
private static final RecordFactory recordFactory = RecordFactoryProvider.
getRecordFactory(null);
@Test
public void testClusterWithYarnClientProtocolProvider() throws Exception {
Configuration conf = new Configuration(false);
Cluster cluster = null;
try {
cluster = new Cluster(conf);
} catch (Exception e) {
throw new Exception(
"Failed to initialize a local runner w/o a cluster framework key", e);
}
try {
assertTrue("client is not a LocalJobRunner",
cluster.getClient() instanceof LocalJobRunner);
} finally {
if (cluster != null) {
cluster.close();
}
}
try {
conf = new Configuration();
conf.set(MRConfig.FRAMEWORK_NAME, MRConfig.YARN_FRAMEWORK_NAME);
cluster = new Cluster(conf);
ClientProtocol client = cluster.getClient();
assertTrue("client is a YARNRunner", client instanceof YARNRunner);
} catch (IOException e) {
} finally {
if (cluster != null) {
cluster.close();
}
}
}
@Test
public void testClusterGetDelegationToken() throws Exception {
Configuration conf = new Configuration(false);
Cluster cluster = null;
try {
conf = new Configuration();
conf.set(MRConfig.FRAMEWORK_NAME, MRConfig.YARN_FRAMEWORK_NAME);
cluster = new Cluster(conf);
YARNRunner yrunner = (YARNRunner) cluster.getClient();
GetDelegationTokenResponse getDTResponse =
recordFactory.newRecordInstance(GetDelegationTokenResponse.class);
org.apache.hadoop.yarn.api.records.Token rmDTToken = recordFactory.newRecordInstance(
org.apache.hadoop.yarn.api.records.Token.class);
rmDTToken.setIdentifier(ByteBuffer.wrap(new byte[2]));
rmDTToken.setKind("Testclusterkind");
rmDTToken.setPassword(ByteBuffer.wrap("testcluster".getBytes()));
rmDTToken.setService("0.0.0.0:8032");
getDTResponse.setRMDelegationToken(rmDTToken);
final ApplicationClientProtocol cRMProtocol = mock(ApplicationClientProtocol.class);
when(cRMProtocol.getDelegationToken(any(
GetDelegationTokenRequest.class))).thenReturn(getDTResponse);
ResourceMgrDelegate rmgrDelegate = new ResourceMgrDelegate(
new YarnConfiguration(conf)) {
@Override
protected void serviceStart() throws Exception {
assertTrue(this.client instanceof YarnClientImpl);
((YarnClientImpl) this.client).setRMClient(cRMProtocol);
}
};
yrunner.setResourceMgrDelegate(rmgrDelegate);
Token t = cluster.getDelegationToken(new Text(" "));
assertTrue("Token kind is instead " + t.getKind().toString(),
"Testclusterkind".equals(t.getKind().toString()));
} finally {
if (cluster != null) {
cluster.close();
}
}
}
}
| 4,791 | 35.580153 | 91 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/MiniHadoopClusterManager.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.FileWriter;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.Map;
import java.util.TreeMap;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.CommandLineParser;
import org.apache.commons.cli.GnuParser;
import org.apache.commons.cli.HelpFormatter;
import org.apache.commons.cli.OptionBuilder;
import org.apache.commons.cli.Options;
import org.apache.commons.cli.ParseException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.MiniMRClientCluster;
import org.apache.hadoop.mapred.MiniMRClientClusterFactory;
import org.apache.hadoop.mapreduce.v2.jobhistory.JHAdminConfig;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.server.MiniYARNCluster;
import org.mortbay.util.ajax.JSON;
/**
* This class drives the creation of a mini-cluster on the local machine. By
* default, a MiniDFSCluster and MiniMRCluster are spawned on the first
* available ports that are found.
*
* A series of command line flags controls the startup cluster options.
*
* This class can dump a Hadoop configuration and some basic metadata (in JSON)
* into a text file.
*
* To shutdown the cluster, kill the process.
*/
public class MiniHadoopClusterManager {
private static final Log LOG = LogFactory
.getLog(MiniHadoopClusterManager.class);
private MiniMRClientCluster mr;
private MiniDFSCluster dfs;
private String writeDetails;
private int numNodeManagers;
private int numDataNodes;
private int nnPort;
private int rmPort;
private int jhsPort;
private StartupOption dfsOpts;
private boolean noDFS;
private boolean noMR;
private String fs;
private String writeConfig;
private JobConf conf;
/**
* Creates configuration options object.
*/
@SuppressWarnings("static-access")
private Options makeOptions() {
Options options = new Options();
options
.addOption("nodfs", false, "Don't start a mini DFS cluster")
.addOption("nomr", false, "Don't start a mini MR cluster")
.addOption("nodemanagers", true,
"How many nodemanagers to start (default 1)")
.addOption("datanodes", true, "How many datanodes to start (default 1)")
.addOption("format", false, "Format the DFS (default false)")
.addOption("nnport", true, "NameNode port (default 0--we choose)")
.addOption(
"namenode",
true,
"URL of the namenode (default "
+ "is either the DFS cluster or a temporary dir)")
.addOption("rmport", true,
"ResourceManager port (default 0--we choose)")
.addOption("jhsport", true,
"JobHistoryServer port (default 0--we choose)")
.addOption(
OptionBuilder.hasArgs().withArgName("property=value")
.withDescription("Options to pass into configuration object")
.create("D"))
.addOption(
OptionBuilder.hasArg().withArgName("path").withDescription(
"Save configuration to this XML file.").create("writeConfig"))
.addOption(
OptionBuilder.hasArg().withArgName("path").withDescription(
"Write basic information to this JSON file.").create(
"writeDetails"))
.addOption(
OptionBuilder.withDescription("Prints option help.").create("help"));
return options;
}
/**
* Main entry-point.
*
* @throws URISyntaxException
*/
public void run(String[] args) throws IOException, URISyntaxException {
if (!parseArguments(args)) {
return;
}
start();
sleepForever();
}
private void sleepForever() {
while (true) {
try {
Thread.sleep(1000 * 60);
} catch (InterruptedException _) {
// nothing
}
}
}
/**
* Starts DFS and MR clusters, as specified in member-variable options. Also
* writes out configuration and details, if requested.
*
* @throws IOException
* @throws FileNotFoundException
* @throws URISyntaxException
*/
public void start() throws IOException, FileNotFoundException,
URISyntaxException {
if (!noDFS) {
dfs = new MiniDFSCluster.Builder(conf).nameNodePort(nnPort)
.numDataNodes(numDataNodes).startupOption(dfsOpts).build();
LOG.info("Started MiniDFSCluster -- namenode on port "
+ dfs.getNameNodePort());
}
if (!noMR) {
if (fs == null && dfs != null) {
fs = dfs.getFileSystem().getUri().toString();
} else if (fs == null) {
fs = "file:///tmp/minimr-" + System.nanoTime();
}
FileSystem.setDefaultUri(conf, new URI(fs));
// Instruct the minicluster to use fixed ports, so user will know which
// ports to use when communicating with the cluster.
conf.setBoolean(YarnConfiguration.YARN_MINICLUSTER_FIXED_PORTS, true);
conf.setBoolean(JHAdminConfig.MR_HISTORY_MINICLUSTER_FIXED_PORTS, true);
conf.set(YarnConfiguration.RM_ADDRESS, MiniYARNCluster.getHostname()
+ ":" + this.rmPort);
conf.set(JHAdminConfig.MR_HISTORY_ADDRESS, MiniYARNCluster.getHostname()
+ ":" + this.jhsPort);
mr = MiniMRClientClusterFactory.create(this.getClass(), numNodeManagers,
conf);
LOG.info("Started MiniMRCluster");
}
if (writeConfig != null) {
FileOutputStream fos = new FileOutputStream(new File(writeConfig));
conf.writeXml(fos);
fos.close();
}
if (writeDetails != null) {
Map<String, Object> map = new TreeMap<String, Object>();
if (dfs != null) {
map.put("namenode_port", dfs.getNameNodePort());
}
if (mr != null) {
map.put("resourcemanager_port", mr.getConfig().get(
YarnConfiguration.RM_ADDRESS).split(":")[1]);
}
FileWriter fw = new FileWriter(new File(writeDetails));
fw.write(new JSON().toJSON(map));
fw.close();
}
}
/**
* Shuts down in-process clusters.
*
* @throws IOException
*/
public void stop() throws IOException {
if (mr != null) {
mr.stop();
}
if (dfs != null) {
dfs.shutdown();
}
}
/**
* Parses arguments and fills out the member variables.
*
* @param args
* Command-line arguments.
* @return true on successful parse; false to indicate that the program should
* exit.
*/
private boolean parseArguments(String[] args) {
Options options = makeOptions();
CommandLine cli;
try {
CommandLineParser parser = new GnuParser();
cli = parser.parse(options, args);
} catch (ParseException e) {
LOG.warn("options parsing failed: " + e.getMessage());
new HelpFormatter().printHelp("...", options);
return false;
}
if (cli.hasOption("help")) {
new HelpFormatter().printHelp("...", options);
return false;
}
if (cli.getArgs().length > 0) {
for (String arg : cli.getArgs()) {
System.err.println("Unrecognized option: " + arg);
new HelpFormatter().printHelp("...", options);
return false;
}
}
// MR
noMR = cli.hasOption("nomr");
numNodeManagers = intArgument(cli, "nodemanagers", 1);
rmPort = intArgument(cli, "rmport", 0);
jhsPort = intArgument(cli, "jhsport", 0);
fs = cli.getOptionValue("namenode");
// HDFS
noDFS = cli.hasOption("nodfs");
numDataNodes = intArgument(cli, "datanodes", 1);
nnPort = intArgument(cli, "nnport", 0);
dfsOpts = cli.hasOption("format") ? StartupOption.FORMAT
: StartupOption.REGULAR;
// Runner
writeDetails = cli.getOptionValue("writeDetails");
writeConfig = cli.getOptionValue("writeConfig");
// General
conf = new JobConf();
updateConfiguration(conf, cli.getOptionValues("D"));
return true;
}
/**
* Updates configuration based on what's given on the command line.
*
* @param conf
* The configuration object
* @param keyvalues
* An array of interleaved key value pairs.
*/
private void updateConfiguration(JobConf conf, String[] keyvalues) {
int num_confs_updated = 0;
if (keyvalues != null) {
for (String prop : keyvalues) {
String[] keyval = prop.split("=", 2);
if (keyval.length == 2) {
conf.set(keyval[0], keyval[1]);
num_confs_updated++;
} else {
LOG.warn("Ignoring -D option " + prop);
}
}
}
LOG.info("Updated " + num_confs_updated
+ " configuration settings from command line.");
}
/**
* Extracts an integer argument with specified default value.
*/
private int intArgument(CommandLine cli, String argName, int default_) {
String o = cli.getOptionValue(argName);
if (o == null) {
return default_;
} else {
return Integer.parseInt(o);
}
}
/**
* Starts a MiniHadoopCluster.
*
* @throws URISyntaxException
*/
public static void main(String[] args) throws IOException, URISyntaxException {
new MiniHadoopClusterManager().run(args);
}
}
| 10,323 | 31.567823 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestClientProtocolProviderImpls.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapred.LocalJobRunner;
import org.apache.hadoop.mapred.YARNRunner;
import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig;
import org.junit.Test;
public class TestClientProtocolProviderImpls {
@Test
public void testClusterWithLocalClientProvider() throws Exception {
Configuration conf = new Configuration();
conf.set(MRConfig.FRAMEWORK_NAME, "local");
Cluster cluster = new Cluster(conf);
assertTrue(cluster.getClient() instanceof LocalJobRunner);
cluster.close();
}
@Test
public void testClusterWithJTClientProvider() throws Exception {
Configuration conf = new Configuration();
try {
conf.set(MRConfig.FRAMEWORK_NAME, "classic");
conf.set(JTConfig.JT_IPC_ADDRESS, "local");
new Cluster(conf);
fail("Cluster with classic Framework name should not use "
+ "local JT address");
} catch (IOException e) {
assertTrue(e.getMessage().contains(
"Cannot initialize Cluster. Please check"));
}
}
@Test
public void testClusterWithYarnClientProvider() throws Exception {
Configuration conf = new Configuration();
conf.set(MRConfig.FRAMEWORK_NAME, "yarn");
Cluster cluster = new Cluster(conf);
assertTrue(cluster.getClient() instanceof YARNRunner);
cluster.close();
}
@Test
public void testClusterException() {
Configuration conf = new Configuration();
try {
conf.set(MRConfig.FRAMEWORK_NAME, "incorrect");
new Cluster(conf);
fail("Cluster should not be initialized with incorrect framework name");
} catch (IOException e) {
assertTrue(e.getMessage().contains(
"Cannot initialize Cluster. Please check"));
}
}
}
| 2,720 | 33.0125 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/MapReduceTestUtil.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.BufferedReader;
import java.io.DataOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.text.NumberFormat;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
import java.util.Random;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configurable;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.mapred.TaskLog;
import org.apache.hadoop.mapred.Utils;
import org.apache.hadoop.mapred.TaskLog.LogName;
import org.apache.hadoop.mapred.TaskLog.Reader;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl;
import org.apache.hadoop.util.ReflectionUtils;
/**
* Utility methods used in various Job Control unit tests.
*/
public class MapReduceTestUtil {
public static final Log LOG =
LogFactory.getLog(MapReduceTestUtil.class.getName());
static private Random rand = new Random();
private static NumberFormat idFormat = NumberFormat.getInstance();
static {
idFormat.setMinimumIntegerDigits(4);
idFormat.setGroupingUsed(false);
}
/**
* Cleans the data from the passed Path in the passed FileSystem.
*
* @param fs FileSystem to delete data from.
* @param dirPath Path to be deleted.
* @throws IOException If an error occurs cleaning the data.
*/
public static void cleanData(FileSystem fs, Path dirPath)
throws IOException {
fs.delete(dirPath, true);
}
/**
* Generates a string of random digits.
*
* @return A random string.
*/
public static String generateRandomWord() {
return idFormat.format(rand.nextLong());
}
/**
* Generates a line of random text.
*
* @return A line of random text.
*/
public static String generateRandomLine() {
long r = rand.nextLong() % 7;
long n = r + 20;
StringBuffer sb = new StringBuffer();
for (int i = 0; i < n; i++) {
sb.append(generateRandomWord()).append(" ");
}
sb.append("\n");
return sb.toString();
}
/**
* Generates random data consisting of 10000 lines.
*
* @param fs FileSystem to create data in.
* @param dirPath Path to create the data in.
* @throws IOException If an error occurs creating the data.
*/
public static void generateData(FileSystem fs, Path dirPath)
throws IOException {
FSDataOutputStream out = fs.create(new Path(dirPath, "data.txt"));
for (int i = 0; i < 10000; i++) {
String line = generateRandomLine();
out.write(line.getBytes("UTF-8"));
}
out.close();
}
/**
* Creates a simple copy job.
*
* @param conf Configuration object
* @param outdir Output directory.
* @param indirs Comma separated input directories.
* @return Job initialized for a data copy job.
* @throws Exception If an error occurs creating job configuration.
*/
public static Job createCopyJob(Configuration conf, Path outdir,
Path... indirs) throws Exception {
conf.setInt(MRJobConfig.NUM_MAPS, 3);
Job theJob = Job.getInstance(conf);
theJob.setJobName("DataMoveJob");
FileInputFormat.setInputPaths(theJob, indirs);
theJob.setMapperClass(DataCopyMapper.class);
FileOutputFormat.setOutputPath(theJob, outdir);
theJob.setOutputKeyClass(Text.class);
theJob.setOutputValueClass(Text.class);
theJob.setReducerClass(DataCopyReducer.class);
theJob.setNumReduceTasks(1);
return theJob;
}
/**
* Creates a simple fail job.
*
* @param conf Configuration object
* @param outdir Output directory.
* @param indirs Comma separated input directories.
* @return Job initialized for a simple fail job.
* @throws Exception If an error occurs creating job configuration.
*/
public static Job createFailJob(Configuration conf, Path outdir,
Path... indirs) throws Exception {
FileSystem fs = outdir.getFileSystem(conf);
if (fs.exists(outdir)) {
fs.delete(outdir, true);
}
conf.setInt(MRJobConfig.MAP_MAX_ATTEMPTS, 2);
Job theJob = Job.getInstance(conf);
theJob.setJobName("Fail-Job");
FileInputFormat.setInputPaths(theJob, indirs);
theJob.setMapperClass(FailMapper.class);
theJob.setReducerClass(Reducer.class);
theJob.setNumReduceTasks(0);
FileOutputFormat.setOutputPath(theJob, outdir);
theJob.setOutputKeyClass(Text.class);
theJob.setOutputValueClass(Text.class);
return theJob;
}
/**
* Creates a simple fail job.
*
* @param conf Configuration object
* @param outdir Output directory.
* @param indirs Comma separated input directories.
* @return Job initialized for a simple kill job.
* @throws Exception If an error occurs creating job configuration.
*/
public static Job createKillJob(Configuration conf, Path outdir,
Path... indirs) throws Exception {
Job theJob = Job.getInstance(conf);
theJob.setJobName("Kill-Job");
FileInputFormat.setInputPaths(theJob, indirs);
theJob.setMapperClass(KillMapper.class);
theJob.setReducerClass(Reducer.class);
theJob.setNumReduceTasks(0);
FileOutputFormat.setOutputPath(theJob, outdir);
theJob.setOutputKeyClass(Text.class);
theJob.setOutputValueClass(Text.class);
return theJob;
}
/**
* Simple Mapper and Reducer implementation which copies data it reads in.
*/
public static class DataCopyMapper extends
Mapper<LongWritable, Text, Text, Text> {
public void map(LongWritable key, Text value, Context context)
throws IOException, InterruptedException {
context.write(new Text(key.toString()), value);
}
}
public static class DataCopyReducer extends Reducer<Text, Text, Text, Text> {
public void reduce(Text key, Iterator<Text> values, Context context)
throws IOException, InterruptedException {
Text dumbKey = new Text("");
while (values.hasNext()) {
Text data = values.next();
context.write(dumbKey, data);
}
}
}
// Mapper that fails
public static class FailMapper extends
Mapper<WritableComparable<?>, Writable, WritableComparable<?>, Writable> {
public void map(WritableComparable<?> key, Writable value, Context context)
throws IOException {
throw new RuntimeException("failing map");
}
}
// Mapper that sleeps for a long time.
// Used for running a job that will be killed
public static class KillMapper extends
Mapper<WritableComparable<?>, Writable, WritableComparable<?>, Writable> {
public void map(WritableComparable<?> key, Writable value, Context context)
throws IOException {
try {
Thread.sleep(1000000);
} catch (InterruptedException e) {
// Do nothing
}
}
}
public static class IncomparableKey implements WritableComparable<Object> {
public void write(DataOutput out) { }
public void readFields(DataInput in) { }
public int compareTo(Object o) {
throw new RuntimeException("Should never see this.");
}
}
public static class FakeSplit extends InputSplit implements Writable {
public void write(DataOutput out) throws IOException { }
public void readFields(DataInput in) throws IOException { }
public long getLength() { return 0L; }
public String[] getLocations() { return new String[0]; }
}
public static class Fake_IF<K,V>
extends InputFormat<K, V>
implements Configurable {
public Fake_IF() { }
public List<InputSplit> getSplits(JobContext context) {
List<InputSplit> ret = new ArrayList<InputSplit>();
ret.add(new FakeSplit());
return ret;
}
public static void setKeyClass(Configuration conf, Class<?> k) {
conf.setClass("test.fakeif.keyclass", k, WritableComparable.class);
}
public static void setValClass(Configuration job, Class<?> v) {
job.setClass("test.fakeif.valclass", v, Writable.class);
}
protected Class<? extends K> keyclass;
protected Class<? extends V> valclass;
Configuration conf = null;
@SuppressWarnings("unchecked")
public void setConf(Configuration conf) {
this.conf = conf;
keyclass = (Class<? extends K>) conf.getClass("test.fakeif.keyclass",
NullWritable.class, WritableComparable.class);
valclass = (Class<? extends V>) conf.getClass("test.fakeif.valclass",
NullWritable.class, WritableComparable.class);
}
public Configuration getConf() {
return conf;
}
public RecordReader<K,V> createRecordReader(
InputSplit ignored, TaskAttemptContext context) {
return new RecordReader<K,V>() {
public boolean nextKeyValue() throws IOException { return false; }
public void initialize(InputSplit split, TaskAttemptContext context)
throws IOException, InterruptedException {}
public K getCurrentKey() {
return null;
}
public V getCurrentValue() {
return null;
}
public void close() throws IOException { }
public float getProgress() throws IOException { return 0.0f; }
};
}
}
public static class Fake_RR<K, V> extends RecordReader<K,V> {
private Class<? extends K> keyclass;
private Class<? extends V> valclass;
public boolean nextKeyValue() throws IOException { return false; }
@SuppressWarnings("unchecked")
public void initialize(InputSplit split, TaskAttemptContext context)
throws IOException, InterruptedException {
Configuration conf = context.getConfiguration();
keyclass = (Class<? extends K>) conf.getClass("test.fakeif.keyclass",
NullWritable.class, WritableComparable.class);
valclass = (Class<? extends V>) conf.getClass("test.fakeif.valclass",
NullWritable.class, WritableComparable.class);
}
public K getCurrentKey() {
return ReflectionUtils.newInstance(keyclass, null);
}
public V getCurrentValue() {
return ReflectionUtils.newInstance(valclass, null);
}
public void close() throws IOException { }
public float getProgress() throws IOException { return 0.0f; }
}
public static Job createJob(Configuration conf, Path inDir, Path outDir,
int numInputFiles, int numReds) throws IOException {
String input = "The quick brown fox\n" + "has many silly\n"
+ "red fox sox\n";
return createJob(conf, inDir, outDir, numInputFiles, numReds, input);
}
public static Job createJob(Configuration conf, Path inDir, Path outDir,
int numInputFiles, int numReds, String input) throws IOException {
Job job = Job.getInstance(conf);
FileSystem fs = FileSystem.get(conf);
if (fs.exists(outDir)) {
fs.delete(outDir, true);
}
if (fs.exists(inDir)) {
fs.delete(inDir, true);
}
fs.mkdirs(inDir);
for (int i = 0; i < numInputFiles; ++i) {
DataOutputStream file = fs.create(new Path(inDir, "part-" + i));
file.writeBytes(input);
file.close();
}
FileInputFormat.setInputPaths(job, inDir);
FileOutputFormat.setOutputPath(job, outDir);
job.setNumReduceTasks(numReds);
return job;
}
public static TaskAttemptContext createDummyMapTaskAttemptContext(
Configuration conf) {
TaskAttemptID tid = new TaskAttemptID("jt", 1, TaskType.MAP, 0, 0);
conf.set(MRJobConfig.TASK_ATTEMPT_ID, tid.toString());
return new TaskAttemptContextImpl(conf, tid);
}
public static StatusReporter createDummyReporter() {
return new StatusReporter() {
public void setStatus(String s) {
}
public void progress() {
}
@Override
public float getProgress() {
return 0;
}
public Counter getCounter(Enum<?> name) {
return new Counters().findCounter(name);
}
public Counter getCounter(String group, String name) {
return new Counters().findCounter(group, name);
}
};
}
// Return output of MR job by reading from the given output directory
public static String readOutput(Path outDir, Configuration conf)
throws IOException {
FileSystem fs = outDir.getFileSystem(conf);
StringBuffer result = new StringBuffer();
Path[] fileList = FileUtil.stat2Paths(fs.listStatus(outDir,
new Utils.OutputFileUtils.OutputFilesFilter()));
for (Path outputFile : fileList) {
LOG.info("Path" + ": "+ outputFile);
BufferedReader file =
new BufferedReader(new InputStreamReader(fs.open(outputFile)));
String line = file.readLine();
while (line != null) {
result.append(line);
result.append("\n");
line = file.readLine();
}
file.close();
}
return result.toString();
}
/**
* Reads tasklog and returns it as string after trimming it.
*
* @param filter
* Task log filter; can be STDOUT, STDERR, SYSLOG, DEBUGOUT, PROFILE
* @param taskId
* The task id for which the log has to collected
* @param isCleanup
* whether the task is a cleanup attempt or not.
* @return task log as string
* @throws IOException
*/
public static String readTaskLog(TaskLog.LogName filter,
org.apache.hadoop.mapred.TaskAttemptID taskId, boolean isCleanup)
throws IOException {
// string buffer to store task log
StringBuffer result = new StringBuffer();
int res;
// reads the whole tasklog into inputstream
InputStream taskLogReader = new TaskLog.Reader(taskId, filter, 0, -1,
isCleanup);
// construct string log from inputstream.
byte[] b = new byte[65536];
while (true) {
res = taskLogReader.read(b);
if (res > 0) {
result.append(new String(b));
} else {
break;
}
}
taskLogReader.close();
// trim the string and return it
String str = result.toString();
str = str.trim();
return str;
}
}
| 15,448 | 32.010684 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestLargeSort.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapred.MiniMRClientCluster;
import org.apache.hadoop.mapred.MiniMRClientClusterFactory;
import org.apache.hadoop.util.ToolRunner;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import java.io.IOException;
import static org.junit.Assert.assertEquals;
public class TestLargeSort {
MiniMRClientCluster cluster;
@Before
public void setup() throws IOException {
Configuration conf = new YarnConfiguration();
cluster = MiniMRClientClusterFactory.create(this.getClass(), 2, conf);
cluster.start();
}
@After
public void cleanup() throws IOException {
if (cluster != null) {
cluster.stop();
cluster = null;
}
}
@Test
public void testLargeSort() throws Exception {
String[] args = new String[0];
int[] ioSortMbs = {128, 256, 1536};
for (int ioSortMb : ioSortMbs) {
Configuration conf = new Configuration(cluster.getConfig());
conf.setInt(MRJobConfig.MAP_MEMORY_MB, 2048);
conf.setInt(MRJobConfig.IO_SORT_MB, ioSortMb);
conf.setInt(LargeSorter.NUM_MAP_TASKS, 1);
conf.setInt(LargeSorter.MBS_PER_MAP, ioSortMb);
assertEquals("Large sort failed for " + ioSortMb, 0,
ToolRunner.run(conf, new LargeSorter(), args));
}
}
}
| 2,222 | 32.179104 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestNewCombinerGrouping.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce;
import org.junit.Assert;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.RawComparator;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
import org.junit.Test;
import java.io.BufferedReader;
import java.io.File;
import java.io.FileReader;
import java.io.FileWriter;
import java.io.IOException;
import java.io.PrintWriter;
import java.util.HashSet;
import java.util.Set;
import java.util.UUID;
public class TestNewCombinerGrouping {
private static String TEST_ROOT_DIR =
new File("build", UUID.randomUUID().toString()).getAbsolutePath();
public static class Map extends
Mapper<LongWritable, Text, Text, LongWritable> {
@Override
protected void map(LongWritable key, Text value,
Context context)
throws IOException, InterruptedException {
String v = value.toString();
String k = v.substring(0, v.indexOf(","));
v = v.substring(v.indexOf(",") + 1);
context.write(new Text(k), new LongWritable(Long.parseLong(v)));
}
}
public static class Reduce extends
Reducer<Text, LongWritable, Text, LongWritable> {
@Override
protected void reduce(Text key, Iterable<LongWritable> values,
Context context)
throws IOException, InterruptedException {
LongWritable maxValue = null;
for (LongWritable value : values) {
if (maxValue == null) {
maxValue = value;
} else if (value.compareTo(maxValue) > 0) {
maxValue = value;
}
}
context.write(key, maxValue);
}
}
public static class Combiner extends Reduce {
}
public static class GroupComparator implements RawComparator<Text> {
@Override
public int compare(byte[] bytes, int i, int i2, byte[] bytes2, int i3,
int i4) {
byte[] b1 = new byte[i2];
System.arraycopy(bytes, i, b1, 0, i2);
byte[] b2 = new byte[i4];
System.arraycopy(bytes2, i3, b2, 0, i4);
return compare(new Text(new String(b1)), new Text(new String(b2)));
}
@Override
public int compare(Text o1, Text o2) {
String s1 = o1.toString();
String s2 = o2.toString();
s1 = s1.substring(0, s1.indexOf("|"));
s2 = s2.substring(0, s2.indexOf("|"));
return s1.compareTo(s2);
}
}
@Test
public void testCombiner() throws Exception {
if (!new File(TEST_ROOT_DIR).mkdirs()) {
throw new RuntimeException("Could not create test dir: " + TEST_ROOT_DIR);
}
File in = new File(TEST_ROOT_DIR, "input");
if (!in.mkdirs()) {
throw new RuntimeException("Could not create test dir: " + in);
}
File out = new File(TEST_ROOT_DIR, "output");
PrintWriter pw = new PrintWriter(new FileWriter(new File(in, "data.txt")));
pw.println("A|a,1");
pw.println("A|b,2");
pw.println("B|a,3");
pw.println("B|b,4");
pw.println("B|c,5");
pw.close();
JobConf conf = new JobConf();
conf.set("mapreduce.framework.name", "local");
Job job = new Job(conf);
TextInputFormat.setInputPaths(job, new Path(in.getPath()));
TextOutputFormat.setOutputPath(job, new Path(out.getPath()));
job.setMapperClass(Map.class);
job.setReducerClass(Reduce.class);
job.setInputFormatClass(TextInputFormat.class);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(LongWritable.class);
job.setOutputFormatClass(TextOutputFormat.class);
job.setGroupingComparatorClass(GroupComparator.class);
job.setCombinerKeyGroupingComparatorClass(GroupComparator.class);
job.setCombinerClass(Combiner.class);
job.getConfiguration().setInt("min.num.spills.for.combine", 0);
job.submit();
job.waitForCompletion(false);
if (job.isSuccessful()) {
Counters counters = job.getCounters();
long combinerInputRecords = counters.findCounter(
"org.apache.hadoop.mapreduce.TaskCounter",
"COMBINE_INPUT_RECORDS").getValue();
long combinerOutputRecords = counters.findCounter(
"org.apache.hadoop.mapreduce.TaskCounter",
"COMBINE_OUTPUT_RECORDS").getValue();
Assert.assertTrue(combinerInputRecords > 0);
Assert.assertTrue(combinerInputRecords > combinerOutputRecords);
BufferedReader br = new BufferedReader(new FileReader(
new File(out, "part-r-00000")));
Set<String> output = new HashSet<String>();
String line = br.readLine();
Assert.assertNotNull(line);
output.add(line.substring(0, 1) + line.substring(4, 5));
line = br.readLine();
Assert.assertNotNull(line);
output.add(line.substring(0, 1) + line.substring(4, 5));
line = br.readLine();
Assert.assertNull(line);
br.close();
Set<String> expected = new HashSet<String>();
expected.add("A2");
expected.add("B5");
Assert.assertEquals(expected, output);
} else {
Assert.fail("Job failed");
}
}
}
| 5,935 | 32.162011 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestMapReduceLazyOutput.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce;
import java.io.IOException;
import java.io.OutputStream;
import java.io.OutputStreamWriter;
import java.io.Writer;
import java.util.Arrays;
import java.util.List;
import junit.framework.TestCase;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.MiniMRCluster;
import org.apache.hadoop.mapred.Utils;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.LazyOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
/**
* A JUnit test to test the Map-Reduce framework's feature to create part
* files only if there is an explicit output.collect. This helps in preventing
* 0 byte files
*/
public class TestMapReduceLazyOutput extends TestCase {
private static final int NUM_HADOOP_SLAVES = 3;
private static final int NUM_MAPS_PER_NODE = 2;
private static final Path INPUT = new Path("/testlazy/input");
private static final List<String> input =
Arrays.asList("All","Roads","Lead","To","Hadoop");
public static class TestMapper
extends Mapper<LongWritable, Text, LongWritable, Text>{
public void map(LongWritable key, Text value, Context context
) throws IOException, InterruptedException {
String id = context.getTaskAttemptID().toString();
// Mapper 0 does not output anything
if (!id.endsWith("0_0")) {
context.write(key, value);
}
}
}
public static class TestReducer
extends Reducer<LongWritable,Text,LongWritable,Text> {
public void reduce(LongWritable key, Iterable<Text> values,
Context context) throws IOException, InterruptedException {
String id = context.getTaskAttemptID().toString();
// Reducer 0 does not output anything
if (!id.endsWith("0_0")) {
for (Text val: values) {
context.write(key, val);
}
}
}
}
private static void runTestLazyOutput(Configuration conf, Path output,
int numReducers, boolean createLazily)
throws Exception {
Job job = Job.getInstance(conf, "Test-Lazy-Output");
FileInputFormat.setInputPaths(job, INPUT);
FileOutputFormat.setOutputPath(job, output);
job.setJarByClass(TestMapReduceLazyOutput.class);
job.setInputFormatClass(TextInputFormat.class);
job.setOutputKeyClass(LongWritable.class);
job.setOutputValueClass(Text.class);
job.setNumReduceTasks(numReducers);
job.setMapperClass(TestMapper.class);
job.setReducerClass(TestReducer.class);
if (createLazily) {
LazyOutputFormat.setOutputFormatClass(job, TextOutputFormat.class);
} else {
job.setOutputFormatClass(TextOutputFormat.class);
}
assertTrue(job.waitForCompletion(true));
}
public void createInput(FileSystem fs, int numMappers) throws Exception {
for (int i =0; i < numMappers; i++) {
OutputStream os = fs.create(new Path(INPUT,
"text" + i + ".txt"));
Writer wr = new OutputStreamWriter(os);
for(String inp : input) {
wr.write(inp+"\n");
}
wr.close();
}
}
public void testLazyOutput() throws Exception {
MiniDFSCluster dfs = null;
MiniMRCluster mr = null;
FileSystem fileSys = null;
try {
Configuration conf = new Configuration();
// Start the mini-MR and mini-DFS clusters
dfs = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_HADOOP_SLAVES)
.build();
fileSys = dfs.getFileSystem();
mr = new MiniMRCluster(NUM_HADOOP_SLAVES, fileSys.getUri().toString(), 1);
int numReducers = 2;
int numMappers = NUM_HADOOP_SLAVES * NUM_MAPS_PER_NODE;
createInput(fileSys, numMappers);
Path output1 = new Path("/testlazy/output1");
// Test 1.
runTestLazyOutput(mr.createJobConf(), output1,
numReducers, true);
Path[] fileList =
FileUtil.stat2Paths(fileSys.listStatus(output1,
new Utils.OutputFileUtils.OutputFilesFilter()));
for(int i=0; i < fileList.length; ++i) {
System.out.println("Test1 File list[" + i + "]" + ": "+ fileList[i]);
}
assertTrue(fileList.length == (numReducers - 1));
// Test 2. 0 Reducers, maps directly write to the output files
Path output2 = new Path("/testlazy/output2");
runTestLazyOutput(mr.createJobConf(), output2, 0, true);
fileList =
FileUtil.stat2Paths(fileSys.listStatus(output2,
new Utils.OutputFileUtils.OutputFilesFilter()));
for(int i=0; i < fileList.length; ++i) {
System.out.println("Test2 File list[" + i + "]" + ": "+ fileList[i]);
}
assertTrue(fileList.length == numMappers - 1);
// Test 3. 0 Reducers, but flag is turned off
Path output3 = new Path("/testlazy/output3");
runTestLazyOutput(mr.createJobConf(), output3, 0, false);
fileList =
FileUtil.stat2Paths(fileSys.listStatus(output3,
new Utils.OutputFileUtils.OutputFilesFilter()));
for(int i=0; i < fileList.length; ++i) {
System.out.println("Test3 File list[" + i + "]" + ": "+ fileList[i]);
}
assertTrue(fileList.length == numMappers);
} finally {
if (dfs != null) { dfs.shutdown(); }
if (mr != null) { mr.shutdown();
}
}
}
}
| 6,506 | 33.068063 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/RandomWriter.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Date;
import java.util.List;
import java.util.Random;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.mapred.ClusterStatus;
import org.apache.hadoop.mapred.JobClient;
import org.apache.hadoop.mapreduce.*;
import org.apache.hadoop.mapreduce.lib.input.FileSplit;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat;
import org.apache.hadoop.util.GenericOptionsParser;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
/**
* This program uses map/reduce to just run a distributed job where there is
* no interaction between the tasks and each task write a large unsorted
* random binary sequence file of BytesWritable.
* In order for this program to generate data for terasort with 10-byte keys
* and 90-byte values, have the following config:
* <pre>{@code
* <?xml version="1.0"?>
* <?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
* <configuration>
* <property>
* <name>mapreduce.randomwriter.minkey</name>
* <value>10</value>
* </property>
* <property>
* <name>mapreduce.randomwriter.maxkey</name>
* <value>10</value>
* </property>
* <property>
* <name>mapreduce.randomwriter.minvalue</name>
* <value>90</value>
* </property>
* <property>
* <name>mapreduce.randomwriter.maxvalue</name>
* <value>90</value>
* </property>
* <property>
* <name>mapreduce.randomwriter.totalbytes</name>
* <value>1099511627776</value>
* </property>
* </configuration>}</pre>
* Equivalently, {@link RandomWriter} also supports all the above options
* and ones supported by {@link GenericOptionsParser} via the command-line.
*/
public class RandomWriter extends Configured implements Tool {
public static final String TOTAL_BYTES = "mapreduce.randomwriter.totalbytes";
public static final String BYTES_PER_MAP =
"mapreduce.randomwriter.bytespermap";
public static final String MAPS_PER_HOST =
"mapreduce.randomwriter.mapsperhost";
public static final String MAX_VALUE = "mapreduce.randomwriter.maxvalue";
public static final String MIN_VALUE = "mapreduce.randomwriter.minvalue";
public static final String MIN_KEY = "mapreduce.randomwriter.minkey";
public static final String MAX_KEY = "mapreduce.randomwriter.maxkey";
/**
* User counters
*/
static enum Counters { RECORDS_WRITTEN, BYTES_WRITTEN }
/**
* A custom input format that creates virtual inputs of a single string
* for each map.
*/
static class RandomInputFormat extends InputFormat<Text, Text> {
/**
* Generate the requested number of file splits, with the filename
* set to the filename of the output file.
*/
public List<InputSplit> getSplits(JobContext job) throws IOException {
List<InputSplit> result = new ArrayList<InputSplit>();
Path outDir = FileOutputFormat.getOutputPath(job);
int numSplits =
job.getConfiguration().getInt(MRJobConfig.NUM_MAPS, 1);
for(int i=0; i < numSplits; ++i) {
result.add(new FileSplit(new Path(outDir, "dummy-split-" + i), 0, 1,
(String[])null));
}
return result;
}
/**
* Return a single record (filename, "") where the filename is taken from
* the file split.
*/
static class RandomRecordReader extends RecordReader<Text, Text> {
Path name;
Text key = null;
Text value = new Text();
public RandomRecordReader(Path p) {
name = p;
}
public void initialize(InputSplit split,
TaskAttemptContext context)
throws IOException, InterruptedException {
}
public boolean nextKeyValue() {
if (name != null) {
key = new Text();
key.set(name.getName());
name = null;
return true;
}
return false;
}
public Text getCurrentKey() {
return key;
}
public Text getCurrentValue() {
return value;
}
public void close() {}
public float getProgress() {
return 0.0f;
}
}
public RecordReader<Text, Text> createRecordReader(InputSplit split,
TaskAttemptContext context) throws IOException, InterruptedException {
return new RandomRecordReader(((FileSplit) split).getPath());
}
}
static class RandomMapper extends Mapper<WritableComparable, Writable,
BytesWritable, BytesWritable> {
private long numBytesToWrite;
private int minKeySize;
private int keySizeRange;
private int minValueSize;
private int valueSizeRange;
private Random random = new Random();
private BytesWritable randomKey = new BytesWritable();
private BytesWritable randomValue = new BytesWritable();
private void randomizeBytes(byte[] data, int offset, int length) {
for(int i=offset + length - 1; i >= offset; --i) {
data[i] = (byte) random.nextInt(256);
}
}
/**
* Given an output filename, write a bunch of random records to it.
*/
public void map(WritableComparable key,
Writable value,
Context context) throws IOException,InterruptedException {
int itemCount = 0;
while (numBytesToWrite > 0) {
int keyLength = minKeySize +
(keySizeRange != 0 ? random.nextInt(keySizeRange) : 0);
randomKey.setSize(keyLength);
randomizeBytes(randomKey.getBytes(), 0, randomKey.getLength());
int valueLength = minValueSize +
(valueSizeRange != 0 ? random.nextInt(valueSizeRange) : 0);
randomValue.setSize(valueLength);
randomizeBytes(randomValue.getBytes(), 0, randomValue.getLength());
context.write(randomKey, randomValue);
numBytesToWrite -= keyLength + valueLength;
context.getCounter(Counters.BYTES_WRITTEN).increment(keyLength + valueLength);
context.getCounter(Counters.RECORDS_WRITTEN).increment(1);
if (++itemCount % 200 == 0) {
context.setStatus("wrote record " + itemCount + ". " +
numBytesToWrite + " bytes left.");
}
}
context.setStatus("done with " + itemCount + " records.");
}
/**
* Save the values out of the configuaration that we need to write
* the data.
*/
@Override
public void setup(Context context) {
Configuration conf = context.getConfiguration();
numBytesToWrite = conf.getLong(BYTES_PER_MAP,
1*1024*1024*1024);
minKeySize = conf.getInt(MIN_KEY, 10);
keySizeRange =
conf.getInt(MAX_KEY, 1000) - minKeySize;
minValueSize = conf.getInt(MIN_VALUE, 0);
valueSizeRange =
conf.getInt(MAX_VALUE, 20000) - minValueSize;
}
}
/**
* This is the main routine for launching a distributed random write job.
* It runs 10 maps/node and each node writes 1 gig of data to a DFS file.
* The reduce doesn't do anything.
*
* @throws IOException
*/
public int run(String[] args) throws Exception {
if (args.length == 0) {
System.out.println("Usage: writer <out-dir>");
ToolRunner.printGenericCommandUsage(System.out);
return 2;
}
Path outDir = new Path(args[0]);
Configuration conf = getConf();
JobClient client = new JobClient(conf);
ClusterStatus cluster = client.getClusterStatus();
int numMapsPerHost = conf.getInt(MAPS_PER_HOST, 10);
long numBytesToWritePerMap = conf.getLong(BYTES_PER_MAP,
1*1024*1024*1024);
if (numBytesToWritePerMap == 0) {
System.err.println("Cannot have" + BYTES_PER_MAP + " set to 0");
return -2;
}
long totalBytesToWrite = conf.getLong(TOTAL_BYTES,
numMapsPerHost*numBytesToWritePerMap*cluster.getTaskTrackers());
int numMaps = (int) (totalBytesToWrite / numBytesToWritePerMap);
if (numMaps == 0 && totalBytesToWrite > 0) {
numMaps = 1;
conf.setLong(BYTES_PER_MAP, totalBytesToWrite);
}
conf.setInt(MRJobConfig.NUM_MAPS, numMaps);
Job job = Job.getInstance(conf);
job.setJarByClass(RandomWriter.class);
job.setJobName("random-writer");
FileOutputFormat.setOutputPath(job, outDir);
job.setOutputKeyClass(BytesWritable.class);
job.setOutputValueClass(BytesWritable.class);
job.setInputFormatClass(RandomInputFormat.class);
job.setMapperClass(RandomMapper.class);
job.setReducerClass(Reducer.class);
job.setOutputFormatClass(SequenceFileOutputFormat.class);
System.out.println("Running " + numMaps + " maps.");
// reducer NONE
job.setNumReduceTasks(0);
Date startTime = new Date();
System.out.println("Job started: " + startTime);
int ret = job.waitForCompletion(true) ? 0 : 1;
Date endTime = new Date();
System.out.println("Job ended: " + endTime);
System.out.println("The job took " +
(endTime.getTime() - startTime.getTime()) /1000 +
" seconds.");
return ret;
}
public static void main(String[] args) throws Exception {
int res = ToolRunner.run(new Configuration(), new RandomWriter(), args);
System.exit(res);
}
}
| 10,585 | 34.52349 | 86 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/LargeSorter.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Date;
import java.util.List;
import java.util.Random;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.mapreduce.lib.input.FileSplit;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
/**
* A sample MR job that helps with testing large sorts in the MapReduce
* framework. Mapper generates the specified number of bytes and pipes them
* to the reducers.
*
* <code>mapreduce.large-sorter.mbs-per-map</code> specifies the amount
* of data (in MBs) to generate per map. By default, this is twice the value
* of <code>mapreduce.task.io.sort.mb</code> or 1 GB if that is not specified
* either.
* <code>mapreduce.large-sorter.map-tasks</code> specifies the number of map
* tasks to run.
* <code>mapreduce.large-sorter.reduce-tasks</code> specifies the number of
* reduce tasks to run.
*/
public class LargeSorter extends Configured implements Tool {
private static final String LS_PREFIX = "mapreduce.large-sorter.";
public static final String MBS_PER_MAP = LS_PREFIX + "mbs-per-map";
public static final String NUM_MAP_TASKS = LS_PREFIX + "map-tasks";
public static final String NUM_REDUCE_TASKS = LS_PREFIX + "reduce-tasks";
private static final String MAX_VALUE = LS_PREFIX + "max-value";
private static final String MIN_VALUE = LS_PREFIX + "min-value";
private static final String MIN_KEY = LS_PREFIX + "min-key";
private static final String MAX_KEY = LS_PREFIX + "max-key";
/**
* User counters
*/
static enum Counters { RECORDS_WRITTEN, BYTES_WRITTEN }
/**
* A custom input format that creates virtual inputs of a single string
* for each map.
*/
static class RandomInputFormat extends InputFormat<Text, Text> {
/**
* Generate the requested number of file splits, with the filename
* set to the filename of the output file.
*/
public List<InputSplit> getSplits(JobContext job) throws IOException {
List<InputSplit> result = new ArrayList<InputSplit>();
Path outDir = FileOutputFormat.getOutputPath(job);
int numSplits =
job.getConfiguration().getInt(MRJobConfig.NUM_MAPS, 1);
for(int i=0; i < numSplits; ++i) {
result.add(new FileSplit(
new Path(outDir, "dummy-split-" + i), 0, 1, null));
}
return result;
}
/**
* Return a single record (filename, "") where the filename is taken from
* the file split.
*/
static class RandomRecordReader extends RecordReader<Text, Text> {
Path name;
Text key = null;
Text value = new Text();
public RandomRecordReader(Path p) {
name = p;
}
public void initialize(InputSplit split,
TaskAttemptContext context)
throws IOException, InterruptedException {
}
public boolean nextKeyValue() {
if (name != null) {
key = new Text();
key.set(name.getName());
name = null;
return true;
}
return false;
}
public Text getCurrentKey() {
return key;
}
public Text getCurrentValue() {
return value;
}
public void close() {}
public float getProgress() {
return 0.0f;
}
}
public RecordReader<Text, Text> createRecordReader(InputSplit split,
TaskAttemptContext context) throws IOException, InterruptedException {
return new RandomRecordReader(((FileSplit) split).getPath());
}
}
static class RandomMapper extends Mapper<WritableComparable, Writable,
BytesWritable, BytesWritable> {
private long numBytesToWrite;
private int minKeySize;
private int keySizeRange;
private int minValueSize;
private int valueSizeRange;
private Random random = new Random();
private BytesWritable randomKey = new BytesWritable();
private BytesWritable randomValue = new BytesWritable();
private void randomizeBytes(byte[] data, int offset, int length) {
for(int i=offset + length - 1; i >= offset; --i) {
data[i] = (byte) random.nextInt(256);
}
}
@Override
public void setup(Context context) {
Configuration conf = context.getConfiguration();
numBytesToWrite = 1024 * 1024 * conf.getLong(MBS_PER_MAP,
2 * conf.getInt(MRJobConfig.IO_SORT_MB, 512));
minKeySize = conf.getInt(MIN_KEY, 10);
keySizeRange =
conf.getInt(MAX_KEY, 1000) - minKeySize;
minValueSize = conf.getInt(MIN_VALUE, 0);
valueSizeRange =
conf.getInt(MAX_VALUE, 20000) - minValueSize;
}
/**
* Given an output filename, write a bunch of random records to it.
*/
public void map(WritableComparable key,
Writable value,
Context context) throws IOException,InterruptedException {
int itemCount = 0;
while (numBytesToWrite > 0) {
int keyLength = minKeySize +
(keySizeRange != 0 ? random.nextInt(keySizeRange) : 0);
randomKey.setSize(keyLength);
randomizeBytes(randomKey.getBytes(), 0, randomKey.getLength());
int valueLength = minValueSize +
(valueSizeRange != 0 ? random.nextInt(valueSizeRange) : 0);
randomValue.setSize(valueLength);
randomizeBytes(randomValue.getBytes(), 0, randomValue.getLength());
context.write(randomKey, randomValue);
numBytesToWrite -= keyLength + valueLength;
context.getCounter(Counters.BYTES_WRITTEN).increment(keyLength + valueLength);
context.getCounter(Counters.RECORDS_WRITTEN).increment(1);
if (++itemCount % 200 == 0) {
context.setStatus("wrote record " + itemCount + ". " +
numBytesToWrite + " bytes left.");
}
}
context.setStatus("done with " + itemCount + " records.");
}
}
static class Discarder extends Reducer<BytesWritable, BytesWritable,
WritableComparable, Writable> {
@Override
public void reduce(BytesWritable key, Iterable<BytesWritable> values,
Context context) throws IOException, InterruptedException {
// Do nothing
}
}
private void verifyNotZero(Configuration conf, String config) {
if (conf.getInt(config, 1) <= 0) {
throw new IllegalArgumentException(config + "should be > 0");
}
}
public int run(String[] args) throws Exception {
Path outDir = new Path(
LargeSorter.class.getName() + System.currentTimeMillis());
Configuration conf = getConf();
verifyNotZero(conf, MBS_PER_MAP);
verifyNotZero(conf, NUM_MAP_TASKS);
conf.setInt(MRJobConfig.NUM_MAPS, conf.getInt(NUM_MAP_TASKS, 2));
int ioSortMb = conf.getInt(MRJobConfig.IO_SORT_MB, 512);
int mapMb = Math.max(2 * ioSortMb, conf.getInt(MRJobConfig.MAP_MEMORY_MB,
MRJobConfig.DEFAULT_MAP_MEMORY_MB));
conf.setInt(MRJobConfig.MAP_MEMORY_MB, mapMb);
conf.set(MRJobConfig.MAP_JAVA_OPTS, "-Xmx" + (mapMb - 200) + "m");
Job job = Job.getInstance(conf);
job.setJarByClass(LargeSorter.class);
job.setJobName("large-sorter");
FileOutputFormat.setOutputPath(job, outDir);
job.setOutputKeyClass(BytesWritable.class);
job.setOutputValueClass(BytesWritable.class);
job.setInputFormatClass(RandomInputFormat.class);
job.setMapperClass(RandomMapper.class);
job.setReducerClass(Discarder.class);
job.setOutputFormatClass(SequenceFileOutputFormat.class);
job.setNumReduceTasks(conf.getInt(NUM_REDUCE_TASKS, 1));
Date startTime = new Date();
System.out.println("Job started: " + startTime);
int ret = 1;
try {
ret = job.waitForCompletion(true) ? 0 : 1;
} finally {
FileSystem.get(conf).delete(outDir, true);
}
Date endTime = new Date();
System.out.println("Job ended: " + endTime);
System.out.println("The job took " +
(endTime.getTime() - startTime.getTime()) /1000 +
" seconds.");
return ret;
}
public static void main(String[] args) throws Exception {
int res = ToolRunner.run(new Configuration(), new LargeSorter(), args);
System.exit(res);
}
}
| 9,565 | 34.561338 | 86 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestMapCollection.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce;
import junit.framework.TestCase;
import java.io.IOException;
import java.io.DataInput;
import java.io.DataOutput;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Random;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
import static org.junit.Assert.*;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configurable;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.*;
import org.apache.hadoop.mapreduce.lib.output.NullOutputFormat;
import org.apache.hadoop.mapreduce.MRConfig;
import org.apache.hadoop.util.ReflectionUtils;
public class TestMapCollection {
private static final Log LOG = LogFactory.getLog(
TestMapCollection.class.getName());
public static abstract class FillWritable implements Writable, Configurable {
private int len;
protected boolean disableRead;
private byte[] b;
private final Random r;
protected final byte fillChar;
public FillWritable(byte fillChar) {
this.fillChar = fillChar;
r = new Random();
final long seed = r.nextLong();
LOG.info("seed: " + seed);
r.setSeed(seed);
}
@Override
public Configuration getConf() {
return null;
}
public void setLength(int len) {
this.len = len;
}
public int compareTo(FillWritable o) {
if (o == this) return 0;
return len - o.len;
}
@Override
public int hashCode() {
return 37 * len;
}
@Override
public boolean equals(Object o) {
if (!(o instanceof FillWritable)) return false;
return 0 == compareTo((FillWritable)o);
}
@Override
public void readFields(DataInput in) throws IOException {
if (disableRead) {
return;
}
len = WritableUtils.readVInt(in);
for (int i = 0; i < len; ++i) {
assertEquals("Invalid byte at " + i, fillChar, in.readByte());
}
}
@Override
public void write(DataOutput out) throws IOException {
if (0 == len) {
return;
}
int written = 0;
if (!disableRead) {
WritableUtils.writeVInt(out, len);
written -= WritableUtils.getVIntSize(len);
}
if (len > 1024) {
if (null == b || b.length < len) {
b = new byte[2 * len];
}
Arrays.fill(b, fillChar);
do {
final int write = Math.min(len - written, r.nextInt(len));
out.write(b, 0, write);
written += write;
} while (written < len);
assertEquals(len, written);
} else {
for (int i = written; i < len; ++i) {
out.write(fillChar);
}
}
}
}
public static class KeyWritable
extends FillWritable implements WritableComparable<FillWritable> {
static final byte keyFill = (byte)('K' & 0xFF);
public KeyWritable() {
super(keyFill);
}
@Override
public void setConf(Configuration conf) {
disableRead = conf.getBoolean("test.disable.key.read", false);
}
}
public static class ValWritable extends FillWritable {
public ValWritable() {
super((byte)('V' & 0xFF));
}
@Override
public void setConf(Configuration conf) {
disableRead = conf.getBoolean("test.disable.val.read", false);
}
}
public static class VariableComparator
implements RawComparator<KeyWritable>, Configurable {
private boolean readLen;
public VariableComparator() { }
@Override
public void setConf(Configuration conf) {
readLen = !conf.getBoolean("test.disable.key.read", false);
}
@Override
public Configuration getConf() { return null; }
public int compare(KeyWritable k1, KeyWritable k2) {
return k1.compareTo(k2);
}
@Override
public int compare(byte[] b1, int s1, int l1,
byte[] b2, int s2, int l2) {
final int n1;
final int n2;
if (readLen) {
n1 = WritableUtils.decodeVIntSize(b1[s1]);
n2 = WritableUtils.decodeVIntSize(b2[s2]);
} else {
n1 = 0;
n2 = 0;
}
for (int i = s1 + n1; i < l1 - n1; ++i) {
assertEquals("Invalid key at " + s1, (int)KeyWritable.keyFill, b1[i]);
}
for (int i = s2 + n2; i < l2 - n2; ++i) {
assertEquals("Invalid key at " + s2, (int)KeyWritable.keyFill, b2[i]);
}
return l1 - l2;
}
}
public static class SpillReducer
extends Reducer<KeyWritable,ValWritable,NullWritable,NullWritable> {
private int numrecs;
private int expected;
@Override
protected void setup(Context job) {
numrecs = 0;
expected = job.getConfiguration().getInt("test.spillmap.records", 100);
}
@Override
protected void reduce(KeyWritable k, Iterable<ValWritable> values,
Context context) throws IOException, InterruptedException {
for (ValWritable val : values) {
++numrecs;
}
}
@Override
protected void cleanup(Context context)
throws IOException, InterruptedException {
assertEquals("Unexpected record count", expected, numrecs);
}
}
public static class FakeSplit extends InputSplit implements Writable {
@Override
public void write(DataOutput out) throws IOException { }
@Override
public void readFields(DataInput in) throws IOException { }
@Override
public long getLength() { return 0L; }
@Override
public String[] getLocations() { return new String[0]; }
}
public abstract static class RecordFactory implements Configurable {
public Configuration getConf() { return null; }
public abstract int keyLen(int i);
public abstract int valLen(int i);
}
public static class FixedRecordFactory extends RecordFactory {
private int keylen;
private int vallen;
public FixedRecordFactory() { }
public void setConf(Configuration conf) {
keylen = conf.getInt("test.fixedrecord.keylen", 0);
vallen = conf.getInt("test.fixedrecord.vallen", 0);
}
public int keyLen(int i) { return keylen; }
public int valLen(int i) { return vallen; }
public static void setLengths(Configuration conf, int keylen, int vallen) {
conf.setInt("test.fixedrecord.keylen", keylen);
conf.setInt("test.fixedrecord.vallen", vallen);
conf.setBoolean("test.disable.key.read", 0 == keylen);
conf.setBoolean("test.disable.val.read", 0 == vallen);
}
}
public static class FakeIF extends InputFormat<KeyWritable,ValWritable> {
public FakeIF() { }
@Override
public List<InputSplit> getSplits(JobContext ctxt) throws IOException {
final int numSplits = ctxt.getConfiguration().getInt(
"test.mapcollection.num.maps", -1);
List<InputSplit> splits = new ArrayList<InputSplit>(numSplits);
for (int i = 0; i < numSplits; ++i) {
splits.add(i, new FakeSplit());
}
return splits;
}
public RecordReader<KeyWritable,ValWritable> createRecordReader(
InputSplit ignored, final TaskAttemptContext taskContext) {
return new RecordReader<KeyWritable,ValWritable>() {
private RecordFactory factory;
private final KeyWritable key = new KeyWritable();
private final ValWritable val = new ValWritable();
private int current;
private int records;
@Override
public void initialize(InputSplit split, TaskAttemptContext context) {
final Configuration conf = context.getConfiguration();
key.setConf(conf);
val.setConf(conf);
factory = ReflectionUtils.newInstance(
conf.getClass("test.mapcollection.class",
FixedRecordFactory.class, RecordFactory.class), conf);
assertNotNull(factory);
current = 0;
records = conf.getInt("test.spillmap.records", 100);
}
@Override
public boolean nextKeyValue() {
key.setLength(factory.keyLen(current));
val.setLength(factory.valLen(current));
return current++ < records;
}
@Override
public KeyWritable getCurrentKey() { return key; }
@Override
public ValWritable getCurrentValue() { return val; }
@Override
public float getProgress() { return (float) current / records; }
@Override
public void close() {
assertEquals("Unexpected count", records, current - 1);
}
};
}
}
private static void runTest(String name, int keylen, int vallen,
int records, int ioSortMB, float spillPer)
throws Exception {
Configuration conf = new Configuration();
conf.setInt(Job.COMPLETION_POLL_INTERVAL_KEY, 100);
Job job = Job.getInstance(conf);
conf = job.getConfiguration();
conf.setInt(MRJobConfig.IO_SORT_MB, ioSortMB);
conf.set(MRJobConfig.MAP_SORT_SPILL_PERCENT, Float.toString(spillPer));
conf.setClass("test.mapcollection.class", FixedRecordFactory.class,
RecordFactory.class);
FixedRecordFactory.setLengths(conf, keylen, vallen);
conf.setInt("test.spillmap.records", records);
runTest(name, job);
}
private static void runTest(String name, Job job) throws Exception {
job.setNumReduceTasks(1);
job.getConfiguration().set(MRConfig.FRAMEWORK_NAME, MRConfig.LOCAL_FRAMEWORK_NAME);
job.getConfiguration().setInt(MRJobConfig.IO_SORT_FACTOR, 1000);
job.getConfiguration().set("fs.defaultFS", "file:///");
job.getConfiguration().setInt("test.mapcollection.num.maps", 1);
job.setInputFormatClass(FakeIF.class);
job.setOutputFormatClass(NullOutputFormat.class);
job.setMapperClass(Mapper.class);
job.setReducerClass(SpillReducer.class);
job.setMapOutputKeyClass(KeyWritable.class);
job.setMapOutputValueClass(ValWritable.class);
job.setSortComparatorClass(VariableComparator.class);
LOG.info("Running " + name);
assertTrue("Job failed!", job.waitForCompletion(false));
}
@Test
public void testValLastByte() throws Exception {
// last byte of record/key is the last/first byte in the spill buffer
runTest("vallastbyte", 128, 896, 1344, 1, 0.5f);
runTest("keylastbyte", 512, 1024, 896, 1, 0.5f);
}
@Test
public void testLargeRecords() throws Exception {
// maps emitting records larger than mapreduce.task.io.sort.mb
runTest("largerec", 100, 1024*1024, 5, 1, .8f);
runTest("largekeyzeroval", 1024*1024, 0, 5, 1, .8f);
}
@Test
public void testSpillPer2B() throws Exception {
// set non-default, 100% speculative spill boundary
runTest("fullspill2B", 1, 1, 10000, 1, 1.0f);
runTest("fullspill200B", 100, 100, 10000, 1, 1.0f);
runTest("fullspillbuf", 10 * 1024, 20 * 1024, 256, 1, 1.0f);
runTest("lt50perspill", 100, 100, 10000, 1, 0.3f);
}
@Test
public void testZeroVal() throws Exception {
// test key/value at zero-length
runTest("zeroval", 1, 0, 10000, 1, .8f);
runTest("zerokey", 0, 1, 10000, 1, .8f);
runTest("zerokeyval", 0, 0, 10000, 1, .8f);
runTest("zerokeyvalfull", 0, 0, 10000, 1, 1.0f);
}
@Test
public void testSingleRecord() throws Exception {
runTest("singlerecord", 100, 100, 1, 1, 1.0f);
runTest("zerokeyvalsingle", 0, 0, 1, 1, 1.0f);
}
@Test
public void testLowSpill() throws Exception {
runTest("lowspill", 4000, 96, 20, 1, 0.00390625f);
}
@Test
public void testSplitMetaSpill() throws Exception {
runTest("splitmetaspill", 7, 1, 131072, 1, 0.8f);
}
public static class StepFactory extends RecordFactory {
public int prekey;
public int postkey;
public int preval;
public int postval;
public int steprec;
public void setConf(Configuration conf) {
prekey = conf.getInt("test.stepfactory.prekey", 0);
postkey = conf.getInt("test.stepfactory.postkey", 0);
preval = conf.getInt("test.stepfactory.preval", 0);
postval = conf.getInt("test.stepfactory.postval", 0);
steprec = conf.getInt("test.stepfactory.steprec", 0);
}
public static void setLengths(Configuration conf, int prekey, int postkey,
int preval, int postval, int steprec) {
conf.setInt("test.stepfactory.prekey", prekey);
conf.setInt("test.stepfactory.postkey", postkey);
conf.setInt("test.stepfactory.preval", preval);
conf.setInt("test.stepfactory.postval", postval);
conf.setInt("test.stepfactory.steprec", steprec);
}
public int keyLen(int i) {
return i > steprec ? postkey : prekey;
}
public int valLen(int i) {
return i > steprec ? postval : preval;
}
}
@Test
public void testPostSpillMeta() throws Exception {
// write larger records until spill, then write records that generate
// no writes into the serialization buffer
Configuration conf = new Configuration();
conf.setInt(Job.COMPLETION_POLL_INTERVAL_KEY, 100);
Job job = Job.getInstance(conf);
conf = job.getConfiguration();
conf.setInt(MRJobConfig.IO_SORT_MB, 1);
// 2^20 * spill = 14336 bytes available post-spill, at most 896 meta
conf.set(MRJobConfig.MAP_SORT_SPILL_PERCENT, Float.toString(.986328125f));
conf.setClass("test.mapcollection.class", StepFactory.class,
RecordFactory.class);
StepFactory.setLengths(conf, 4000, 0, 96, 0, 252);
conf.setInt("test.spillmap.records", 1000);
conf.setBoolean("test.disable.key.read", true);
conf.setBoolean("test.disable.val.read", true);
runTest("postspillmeta", job);
}
@Test
public void testLargeRecConcurrent() throws Exception {
Configuration conf = new Configuration();
conf.setInt(Job.COMPLETION_POLL_INTERVAL_KEY, 100);
Job job = Job.getInstance(conf);
conf = job.getConfiguration();
conf.setInt(MRJobConfig.IO_SORT_MB, 1);
conf.set(MRJobConfig.MAP_SORT_SPILL_PERCENT, Float.toString(.986328125f));
conf.setClass("test.mapcollection.class", StepFactory.class,
RecordFactory.class);
StepFactory.setLengths(conf, 4000, 261120, 96, 1024, 251);
conf.setInt("test.spillmap.records", 255);
conf.setBoolean("test.disable.key.read", false);
conf.setBoolean("test.disable.val.read", false);
runTest("largeconcurrent", job);
}
public static class RandomFactory extends RecordFactory {
public int minkey;
public int maxkey;
public int minval;
public int maxval;
private final Random r = new Random();
private static int nextRand(Random r, int max) {
return (int)Math.exp(r.nextDouble() * Math.log(max));
}
public void setConf(Configuration conf) {
r.setSeed(conf.getLong("test.randomfactory.seed", 0L));
minkey = conf.getInt("test.randomfactory.minkey", 0);
maxkey = conf.getInt("test.randomfactory.maxkey", 0) - minkey;
minval = conf.getInt("test.randomfactory.minval", 0);
maxval = conf.getInt("test.randomfactory.maxval", 0) - minval;
}
public static void setLengths(Configuration conf, Random r, int max) {
int k1 = nextRand(r, max);
int k2 = nextRand(r, max);
if (k1 > k2) {
final int tmp = k1;
k1 = k2;
k2 = k1;
}
int v1 = nextRand(r, max);
int v2 = nextRand(r, max);
if (v1 > v2) {
final int tmp = v1;
v1 = v2;
v2 = v1;
}
setLengths(conf, k1, ++k2, v1, ++v2);
}
public static void setLengths(Configuration conf, int minkey, int maxkey,
int minval, int maxval) {
assert minkey < maxkey;
assert minval < maxval;
conf.setInt("test.randomfactory.minkey", minkey);
conf.setInt("test.randomfactory.maxkey", maxkey);
conf.setInt("test.randomfactory.minval", minval);
conf.setInt("test.randomfactory.maxval", maxval);
conf.setBoolean("test.disable.key.read", minkey == 0);
conf.setBoolean("test.disable.val.read", minval == 0);
}
public int keyLen(int i) {
return minkey + nextRand(r, maxkey - minkey);
}
public int valLen(int i) {
return minval + nextRand(r, maxval - minval);
}
}
@Test
public void testRandom() throws Exception {
Configuration conf = new Configuration();
conf.setInt(Job.COMPLETION_POLL_INTERVAL_KEY, 100);
Job job = Job.getInstance(conf);
conf = job.getConfiguration();
conf.setInt(MRJobConfig.IO_SORT_MB, 1);
conf.setClass("test.mapcollection.class", RandomFactory.class,
RecordFactory.class);
final Random r = new Random();
final long seed = r.nextLong();
LOG.info("SEED: " + seed);
r.setSeed(seed);
conf.set(MRJobConfig.MAP_SORT_SPILL_PERCENT,
Float.toString(Math.max(0.1f, r.nextFloat())));
RandomFactory.setLengths(conf, r, 1 << 14);
conf.setInt("test.spillmap.records", r.nextInt(500));
conf.setLong("test.randomfactory.seed", r.nextLong());
runTest("random", job);
}
@Test
public void testRandomCompress() throws Exception {
Configuration conf = new Configuration();
conf.setInt(Job.COMPLETION_POLL_INTERVAL_KEY, 100);
Job job = Job.getInstance(conf);
conf = job.getConfiguration();
conf.setInt(MRJobConfig.IO_SORT_MB, 1);
conf.setBoolean(MRJobConfig.MAP_OUTPUT_COMPRESS, true);
conf.setClass("test.mapcollection.class", RandomFactory.class,
RecordFactory.class);
final Random r = new Random();
final long seed = r.nextLong();
LOG.info("SEED: " + seed);
r.setSeed(seed);
conf.set(MRJobConfig.MAP_SORT_SPILL_PERCENT,
Float.toString(Math.max(0.1f, r.nextFloat())));
RandomFactory.setLengths(conf, r, 1 << 14);
conf.setInt("test.spillmap.records", r.nextInt(500));
conf.setLong("test.randomfactory.seed", r.nextLong());
runTest("randomCompress", job);
}
}
| 18,610 | 33.401109 | 87 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestCounters.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce;
import java.io.IOException;
import java.util.Random;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.DataInputBuffer;
import org.apache.hadoop.io.DataOutputBuffer;
import org.junit.Test;
import static org.junit.Assert.*;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.mapreduce.counters.LimitExceededException;
import org.apache.hadoop.mapreduce.counters.Limits;
/**
* TestCounters checks the sanity and recoverability of {@code Counters}
*/
public class TestCounters {
static final Log LOG = LogFactory.getLog(TestCounters.class);
/**
* Verify counter value works
*/
@Test
public void testCounterValue() {
final int NUMBER_TESTS = 100;
final int NUMBER_INC = 10;
final Random rand = new Random();
for (int i = 0; i < NUMBER_TESTS; i++) {
long initValue = rand.nextInt();
long expectedValue = initValue;
Counter counter = new Counters().findCounter("test", "foo");
counter.setValue(initValue);
assertEquals("Counter value is not initialized correctly",
expectedValue, counter.getValue());
for (int j = 0; j < NUMBER_INC; j++) {
int incValue = rand.nextInt();
counter.increment(incValue);
expectedValue += incValue;
assertEquals("Counter value is not incremented correctly",
expectedValue, counter.getValue());
}
expectedValue = rand.nextInt();
counter.setValue(expectedValue);
assertEquals("Counter value is not set correctly",
expectedValue, counter.getValue());
}
}
@Test public void testLimits() {
for (int i = 0; i < 3; ++i) {
// make sure limits apply to separate containers
testMaxCounters(new Counters());
testMaxGroups(new Counters());
}
}
@Test public void testResetOnDeserialize() throws IOException {
// Allow only one counterGroup
Configuration conf = new Configuration();
conf.setInt(MRJobConfig.COUNTER_GROUPS_MAX_KEY, 1);
Limits.init(conf);
Counters countersWithOneGroup = new Counters();
countersWithOneGroup.findCounter("firstOf1Allowed", "First group");
boolean caughtExpectedException = false;
try {
countersWithOneGroup.findCounter("secondIsTooMany", "Second group");
}
catch (LimitExceededException _) {
caughtExpectedException = true;
}
assertTrue("Did not throw expected exception",
caughtExpectedException);
Counters countersWithZeroGroups = new Counters();
DataOutputBuffer out = new DataOutputBuffer();
countersWithZeroGroups.write(out);
DataInputBuffer in = new DataInputBuffer();
in.reset(out.getData(), out.getLength());
countersWithOneGroup.readFields(in);
// After reset one should be able to add a group
countersWithOneGroup.findCounter("firstGroupAfterReset", "After reset " +
"limit should be set back to zero");
}
@Test
public void testCountersIncrement() {
Counters fCounters = new Counters();
Counter fCounter = fCounters.findCounter(FRAMEWORK_COUNTER);
fCounter.setValue(100);
Counter gCounter = fCounters.findCounter("test", "foo");
gCounter.setValue(200);
Counters counters = new Counters();
counters.incrAllCounters(fCounters);
Counter counter;
for (CounterGroup cg : fCounters) {
CounterGroup group = counters.getGroup(cg.getName());
if (group.getName().equals("test")) {
counter = counters.findCounter("test", "foo");
assertEquals(200, counter.getValue());
} else {
counter = counters.findCounter(FRAMEWORK_COUNTER);
assertEquals(100, counter.getValue());
}
}
}
static final Enum<?> FRAMEWORK_COUNTER = TaskCounter.CPU_MILLISECONDS;
static final long FRAMEWORK_COUNTER_VALUE = 8;
static final String FS_SCHEME = "HDFS";
static final FileSystemCounter FS_COUNTER = FileSystemCounter.BYTES_READ;
static final long FS_COUNTER_VALUE = 10;
private void testMaxCounters(final Counters counters) {
LOG.info("counters max="+ Limits.getCountersMax());
for (int i = 0; i < Limits.getCountersMax(); ++i) {
counters.findCounter("test", "test"+ i);
}
setExpected(counters);
shouldThrow(LimitExceededException.class, new Runnable() {
public void run() {
counters.findCounter("test", "bad");
}
});
checkExpected(counters);
}
private void testMaxGroups(final Counters counters) {
LOG.info("counter groups max="+ Limits.getGroupsMax());
for (int i = 0; i < Limits.getGroupsMax(); ++i) {
// assuming COUNTERS_MAX > GROUPS_MAX
counters.findCounter("test"+ i, "test");
}
setExpected(counters);
shouldThrow(LimitExceededException.class, new Runnable() {
public void run() {
counters.findCounter("bad", "test");
}
});
checkExpected(counters);
}
private void setExpected(Counters counters) {
counters.findCounter(FRAMEWORK_COUNTER).setValue(FRAMEWORK_COUNTER_VALUE);
counters.findCounter(FS_SCHEME, FS_COUNTER).setValue(FS_COUNTER_VALUE);
}
private void checkExpected(Counters counters) {
assertEquals(FRAMEWORK_COUNTER_VALUE,
counters.findCounter(FRAMEWORK_COUNTER).getValue());
assertEquals(FS_COUNTER_VALUE,
counters.findCounter(FS_SCHEME, FS_COUNTER).getValue());
}
private void shouldThrow(Class<? extends Exception> ecls, Runnable runnable) {
try {
runnable.run();
} catch (Exception e) {
assertSame(ecls, e.getClass());
LOG.info("got expected: "+ e);
return;
}
assertTrue("Should've thrown "+ ecls.getSimpleName(), false);
}
}
| 6,548 | 33.109375 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestValueIterReset.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.OutputStream;
import java.io.OutputStreamWriter;
import java.io.Writer;
import java.util.ArrayList;
import java.util.StringTokenizer;
import junit.framework.TestCase;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.Utils;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
/**
* A JUnit test to test the Map-Reduce framework's support for the
* "mark-reset" functionality in Reduce Values Iterator
*/
public class TestValueIterReset extends TestCase {
private static final int NUM_MAPS = 1;
private static final int NUM_TESTS = 4;
private static final int NUM_VALUES = 40;
private static Path TEST_ROOT_DIR =
new Path(System.getProperty("test.build.data","/tmp"));
private static Configuration conf = new Configuration();
private static FileSystem localFs;
static {
try {
localFs = FileSystem.getLocal(conf);
} catch (IOException io) {
throw new RuntimeException("problem getting local fs", io);
}
}
private static final Log LOG =
LogFactory.getLog(TestValueIterReset.class);
public static class TestMapper
extends Mapper<LongWritable, Text, IntWritable, IntWritable> {
public void map(LongWritable key, Text value, Context context)
throws IOException, InterruptedException {
IntWritable outKey = new IntWritable();
IntWritable outValue = new IntWritable();
for (int j = 0; j < NUM_TESTS; j++) {
for (int i = 0; i < NUM_VALUES; i++) {
outKey.set(j);
outValue.set(i);
context.write(outKey, outValue);
}
}
}
}
public static class TestReducer
extends Reducer< IntWritable,IntWritable,IntWritable,IntWritable> {
public void reduce(IntWritable key, Iterable<IntWritable> values,
Context context) throws IOException, InterruptedException {
int errors = 0;
MarkableIterator<IntWritable> mitr =
new MarkableIterator<IntWritable>(values.iterator());
switch (key.get()) {
case 0:
errors += test0(key, mitr);
break;
case 1:
errors += test1(key, mitr);
break;
case 2:
errors += test2(key, mitr);
break;
case 3:
errors += test3(key, mitr);
break;
default:
break;
}
context.write(key, new IntWritable(errors));
}
}
/**
* Test the most common use case. Mark before start of the iteration and
* reset at the end to go over the entire list
* @param key
* @param values
* @return
* @throws IOException
*/
private static int test0(IntWritable key,
MarkableIterator<IntWritable> values)
throws IOException {
int errors = 0;
IntWritable i;
ArrayList<IntWritable> expectedValues = new ArrayList<IntWritable>();
LOG.info("Executing TEST:0 for Key:"+ key.toString());
values.mark();
LOG.info("TEST:0. Marking");
while (values.hasNext()) {
i = values.next();
expectedValues.add(i);
LOG.info(key + ":" + i);
}
values.reset();
LOG.info("TEST:0. Reset");
int count = 0;
while (values.hasNext()) {
i = values.next();
LOG.info(key + ":" + i);
if (i != expectedValues.get(count)) {
LOG.info("TEST:0. Check:1 Expected: " + expectedValues.get(count) +
", Got: " + i);
errors ++;
return errors;
}
count ++;
}
LOG.info("TEST:0 Done");
return errors;
}
/**
* Test the case where we do a mark outside of a reset. Test for both file
* and memory caches
* @param key
* @param values
* @return
* @throws IOException
*/
private static int test1(IntWritable key,
MarkableIterator<IntWritable> values)
throws IOException {
IntWritable i;
int errors = 0;
int count = 0;
ArrayList<IntWritable> expectedValues = new ArrayList<IntWritable>();
ArrayList<IntWritable> expectedValues1 = new ArrayList<IntWritable>();
LOG.info("Executing TEST:1 for Key:" + key);
values.mark();
LOG.info("TEST:1. Marking");
while (values.hasNext()) {
i = values.next();
LOG.info(key + ":" + i);
expectedValues.add(i);
if (count == 2) {
break;
}
count ++;
}
values.reset();
LOG.info("TEST:1. Reset");
count = 0;
while (values.hasNext()) {
i = values.next();
LOG.info(key + ":" + i);
if (count < expectedValues.size()) {
if (i != expectedValues.get(count)) {
errors ++;
LOG.info("TEST:1. Check:1 Expected: " + expectedValues.get(count) +
", Got: " + i);
return errors;
}
}
// We have moved passed the first mark, but still in the memory cache
if (count == 3) {
values.mark();
LOG.info("TEST:1. Marking -- " + key + ": " + i);
}
if (count >= 3) {
expectedValues1.add(i);
}
if (count == 5) {
break;
}
count ++;
}
if (count < expectedValues.size()) {
LOG.info(("TEST:1 Check:2. Iterator returned lesser values"));
errors ++;
return errors;
}
values.reset();
count = 0;
LOG.info("TEST:1. Reset");
expectedValues.clear();
while (values.hasNext()) {
i = values.next();
LOG.info(key + ":" + i);
if (count < expectedValues1.size()) {
if (i != expectedValues1.get(count)) {
errors ++;
LOG.info("TEST:1. Check:3 Expected: " + expectedValues1.get(count)
+ ", Got: " + i);
return errors;
}
}
// We have moved passed the previous mark, but now we are in the file
// cache
if (count == 25) {
values.mark();
LOG.info("TEST:1. Marking -- " + key + ":" + i);
}
if (count >= 25) {
expectedValues.add(i);
}
count ++;
}
if (count < expectedValues1.size()) {
LOG.info(("TEST:1 Check:4. Iterator returned fewer values"));
errors ++;
return errors;
}
values.reset();
LOG.info("TEST:1. Reset");
count = 0;
while (values.hasNext()) {
i = values.next();
LOG.info(key + ":" + i);
if (i != expectedValues.get(count)) {
errors ++;
LOG.info("TEST:1. Check:5 Expected: " + expectedValues.get(count)
+ ", Got: " + i);
return errors;
}
}
LOG.info("TEST:1 Done");
return errors;
}
/**
* Test the case where we do a mark inside a reset. Test for both file
* and memory
* @param key
* @param values
* @return
* @throws IOException
*/
private static int test2(IntWritable key,
MarkableIterator<IntWritable> values)
throws IOException {
IntWritable i;
int errors = 0;
int count = 0;
ArrayList<IntWritable> expectedValues = new ArrayList<IntWritable>();
ArrayList<IntWritable> expectedValues1 = new ArrayList<IntWritable>();
LOG.info("Executing TEST:2 for Key:" + key);
values.mark();
LOG.info("TEST:2 Marking");
while (values.hasNext()) {
i = values.next();
LOG.info(key + ":" + i);
expectedValues.add(i);
if (count == 8) {
break;
}
count ++;
}
values.reset();
count = 0;
LOG.info("TEST:2 reset");
while (values.hasNext()) {
i = values.next();
LOG.info(key + ":" + i);
if (count < expectedValues.size()) {
if (i != expectedValues.get(count)) {
errors ++;
LOG.info("TEST:2. Check:1 Expected: " + expectedValues.get(count)
+ ", Got: " + i);
return errors;
}
}
// We have moved passed the first mark, but still reading from the
// memory cache
if (count == 3) {
values.mark();
LOG.info("TEST:2. Marking -- " + key + ":" + i);
}
if (count >= 3) {
expectedValues1.add(i);
}
count ++;
}
values.reset();
LOG.info("TEST:2. Reset");
expectedValues.clear();
count = 0;
while (values.hasNext()) {
i = values.next();
LOG.info(key + ":" + i);
if (count < expectedValues1.size()) {
if (i != expectedValues1.get(count)) {
errors ++;
LOG.info("TEST:2. Check:2 Expected: " + expectedValues1.get(count)
+ ", Got: " + i);
return errors;
}
}
// We have moved passed the previous mark, but now we are in the file
// cache
if (count == 20) {
values.mark();
LOG.info("TEST:2. Marking -- " + key + ":" + i);
}
if (count >= 20) {
expectedValues.add(i);
}
count ++;
}
values.reset();
count = 0;
LOG.info("TEST:2. Reset");
while (values.hasNext()) {
i = values.next();
LOG.info(key + ":" + i);
if (i != expectedValues.get(count)) {
errors ++;
LOG.info("TEST:2. Check:1 Expected: " + expectedValues.get(count)
+ ", Got: " + i);
return errors;
}
}
LOG.info("TEST:2 Done");
return errors;
}
/**
* Test "clearMark"
* @param key
* @param values
* @return
* @throws IOException
*/
private static int test3(IntWritable key,
MarkableIterator<IntWritable> values)
throws IOException {
int errors = 0;
IntWritable i;
ArrayList<IntWritable> expectedValues = new ArrayList<IntWritable>();
LOG.info("Executing TEST:3 for Key:" + key);
values.mark();
LOG.info("TEST:3. Marking");
int count = 0;
while (values.hasNext()) {
i = values.next();;
LOG.info(key + ":" + i);
if (count == 5) {
LOG.info("TEST:3. Clearing Mark");
values.clearMark();
}
if (count == 8) {
LOG.info("TEST:3. Marking -- " + key + ":" + i);
values.mark();
}
if (count >= 8) {
expectedValues.add(i);
}
count ++;
}
values.reset();
LOG.info("TEST:3. After reset");
if (!values.hasNext()) {
errors ++;
LOG.info("TEST:3, Check:1. HasNext returned false");
return errors;
}
count = 0;
while (values.hasNext()) {
i = values.next();
LOG.info(key + ":" + i);
if (count < expectedValues.size()) {
if (i != expectedValues.get(count)) {
errors ++;
LOG.info("TEST:2. Check:1 Expected: " + expectedValues.get(count)
+ ", Got: " + i);
return errors;
}
}
if (count == 10) {
values.clearMark();
LOG.info("TEST:3. After clear mark");
}
count ++;
}
boolean successfulClearMark = false;
try {
LOG.info("TEST:3. Before Reset");
values.reset();
} catch (IOException e) {
successfulClearMark = true;
}
if (!successfulClearMark) {
LOG.info("TEST:3 Check:4 reset was successfule even after clearMark");
errors ++;
return errors;
}
LOG.info("TEST:3 Done.");
return errors;
}
public void createInput() throws Exception {
// Just create one line files. We use this only to
// control the number of map tasks
for (int i = 0; i < NUM_MAPS; i++) {
Path file = new Path(TEST_ROOT_DIR+"/in", "test" + i + ".txt");
localFs.delete(file, false);
OutputStream os = localFs.create(file);
Writer wr = new OutputStreamWriter(os);
wr.write("dummy");
wr.close();
}
}
public void testValueIterReset() {
try {
Configuration conf = new Configuration();
Job job = Job.getInstance(conf, "TestValueIterReset") ;
job.setJarByClass(TestValueIterReset.class);
job.setMapperClass(TestMapper.class);
job.setReducerClass(TestReducer.class);
job.setNumReduceTasks(NUM_TESTS);
job.setMapOutputKeyClass(IntWritable.class);
job.setMapOutputValueClass(IntWritable.class);
job.setOutputKeyClass(IntWritable.class);
job.setOutputValueClass(IntWritable.class);
job.getConfiguration().
setInt(MRJobConfig.REDUCE_MARKRESET_BUFFER_SIZE,128);
job.setInputFormatClass(TextInputFormat.class);
job.setOutputFormatClass(TextOutputFormat.class);
FileInputFormat.addInputPath(job,
new Path(TEST_ROOT_DIR + "/in"));
Path output = new Path(TEST_ROOT_DIR + "/out");
localFs.delete(output, true);
FileOutputFormat.setOutputPath(job, output);
createInput();
assertTrue(job.waitForCompletion(true));
validateOutput();
} catch (Exception e) {
e.printStackTrace();
assertTrue(false);
}
}
private void validateOutput() throws IOException {
Path[] outputFiles = FileUtil.stat2Paths(
localFs.listStatus(new Path(TEST_ROOT_DIR + "/out"),
new Utils.OutputFileUtils.OutputFilesFilter()));
if (outputFiles.length > 0) {
InputStream is = localFs.open(outputFiles[0]);
BufferedReader reader = new BufferedReader(new InputStreamReader(is));
String line = reader.readLine();
while (line != null) {
StringTokenizer tokeniz = new StringTokenizer(line, "\t");
String key = tokeniz.nextToken();
String value = tokeniz.nextToken();
LOG.info("Output: key: "+ key + " value: "+ value);
int errors = Integer.parseInt(value);
assertTrue(errors == 0);
line = reader.readLine();
}
reader.close();
}
}
}
| 15,120 | 25.435315 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestChild.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce;
import java.io.File;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathFilter;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.HadoopTestCase;
import org.apache.hadoop.mapred.JobConf;
import org.apache.log4j.Level;
public class TestChild extends HadoopTestCase {
private static String TEST_ROOT_DIR =
new File(System.getProperty("test.build.data","/tmp"))
.toURI().toString().replace(' ', '+');
private final Path inDir = new Path(TEST_ROOT_DIR, "./wc/input");
private final Path outDir = new Path(TEST_ROOT_DIR, "./wc/output");
private final static String OLD_CONFIGS = "test.old.configs";
private final static String TASK_OPTS_VAL = "-Xmx200m";
private final static String MAP_OPTS_VAL = "-Xmx200m";
private final static String REDUCE_OPTS_VAL = "-Xmx300m";
public TestChild() throws IOException {
super(HadoopTestCase.CLUSTER_MR , HadoopTestCase.LOCAL_FS, 2, 2);
}
static class MyMapper extends Mapper<LongWritable, Text, LongWritable, Text> {
@Override
protected void setup(Context context) throws IOException,
InterruptedException {
Configuration conf = context.getConfiguration();
boolean oldConfigs = conf.getBoolean(OLD_CONFIGS, false);
if (oldConfigs) {
String javaOpts = conf.get(JobConf.MAPRED_TASK_JAVA_OPTS);
assertNotNull(JobConf.MAPRED_TASK_JAVA_OPTS + " is null!",
javaOpts);
assertEquals(JobConf.MAPRED_TASK_JAVA_OPTS + " has value of: " +
javaOpts,
javaOpts, TASK_OPTS_VAL);
} else {
String mapJavaOpts = conf.get(JobConf.MAPRED_MAP_TASK_JAVA_OPTS);
assertNotNull(JobConf.MAPRED_MAP_TASK_JAVA_OPTS + " is null!",
mapJavaOpts);
assertEquals(JobConf.MAPRED_MAP_TASK_JAVA_OPTS + " has value of: " +
mapJavaOpts,
mapJavaOpts, MAP_OPTS_VAL);
}
Level logLevel =
Level.toLevel(conf.get(JobConf.MAPRED_MAP_TASK_LOG_LEVEL,
Level.INFO.toString()));
assertEquals(JobConf.MAPRED_MAP_TASK_LOG_LEVEL + "has value of " +
logLevel, logLevel, Level.OFF);
}
}
static class MyReducer
extends Reducer<LongWritable, Text, LongWritable, Text> {
@Override
protected void setup(Context context)
throws IOException, InterruptedException {
Configuration conf = context.getConfiguration();
boolean oldConfigs = conf.getBoolean(OLD_CONFIGS, false);
if (oldConfigs) {
String javaOpts = conf.get(JobConf.MAPRED_TASK_JAVA_OPTS);
assertNotNull(JobConf.MAPRED_TASK_JAVA_OPTS + " is null!",
javaOpts);
assertEquals(JobConf.MAPRED_TASK_JAVA_OPTS + " has value of: " +
javaOpts,
javaOpts, TASK_OPTS_VAL);
} else {
String reduceJavaOpts = conf.get(JobConf.MAPRED_REDUCE_TASK_JAVA_OPTS);
assertNotNull(JobConf.MAPRED_REDUCE_TASK_JAVA_OPTS + " is null!",
reduceJavaOpts);
assertEquals(JobConf.MAPRED_REDUCE_TASK_JAVA_OPTS + " has value of: " +
reduceJavaOpts,
reduceJavaOpts, REDUCE_OPTS_VAL);
}
Level logLevel =
Level.toLevel(conf.get(JobConf.MAPRED_REDUCE_TASK_LOG_LEVEL,
Level.INFO.toString()));
assertEquals(JobConf.MAPRED_REDUCE_TASK_LOG_LEVEL + "has value of " +
logLevel, logLevel, Level.OFF);
}
}
private Job submitAndValidateJob(JobConf conf, int numMaps, int numReds,
boolean oldConfigs)
throws IOException, InterruptedException, ClassNotFoundException {
conf.setBoolean(OLD_CONFIGS, oldConfigs);
if (oldConfigs) {
conf.set(JobConf.MAPRED_TASK_JAVA_OPTS, TASK_OPTS_VAL);
} else {
conf.set(JobConf.MAPRED_MAP_TASK_JAVA_OPTS, MAP_OPTS_VAL);
conf.set(JobConf.MAPRED_REDUCE_TASK_JAVA_OPTS, REDUCE_OPTS_VAL);
}
conf.set(JobConf.MAPRED_MAP_TASK_LOG_LEVEL, Level.OFF.toString());
conf.set(JobConf.MAPRED_REDUCE_TASK_LOG_LEVEL, Level.OFF.toString());
Job job = MapReduceTestUtil.createJob(conf, inDir, outDir,
numMaps, numReds);
job.setMapperClass(MyMapper.class);
job.setReducerClass(MyReducer.class);
assertFalse("Job already has a job tracker connection, before it's submitted",
job.isConnected());
job.submit();
assertTrue("Job doesn't have a job tracker connection, even though it's been submitted",
job.isConnected());
job.waitForCompletion(true);
assertTrue(job.isSuccessful());
// Check output directory
FileSystem fs = FileSystem.get(conf);
assertTrue("Job output directory doesn't exit!", fs.exists(outDir));
FileStatus[] list = fs.listStatus(outDir, new OutputFilter());
int numPartFiles = numReds == 0 ? numMaps : numReds;
assertTrue("Number of part-files is " + list.length + " and not "
+ numPartFiles, list.length == numPartFiles);
return job;
}
public void testChild() throws Exception {
try {
submitAndValidateJob(createJobConf(), 1, 1, true);
submitAndValidateJob(createJobConf(), 1, 1, false);
} finally {
tearDown();
}
}
private static class OutputFilter implements PathFilter {
public boolean accept(Path path) {
return !(path.getName().startsWith("_"));
}
}
}
| 6,597 | 39.231707 | 92 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/FailJob.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce;
import java.io.BufferedWriter;
import java.io.IOException;
import java.io.OutputStreamWriter;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.NullOutputFormat;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
/**
* Dummy class for testing failed mappers and/or reducers.
*
* Mappers emit a token amount of data.
*/
public class FailJob extends Configured implements Tool {
public static String FAIL_MAP = "mapreduce.failjob.map.fail";
public static String FAIL_REDUCE = "mapreduce.failjob.reduce.fail";
public static class FailMapper
extends Mapper<LongWritable, Text, LongWritable, NullWritable> {
public void map(LongWritable key, Text value, Context context
) throws IOException, InterruptedException {
if (context.getConfiguration().getBoolean(FAIL_MAP, true)) {
throw new RuntimeException("Intentional map failure");
}
context.write(key, NullWritable.get());
}
}
public static class FailReducer
extends Reducer<LongWritable, NullWritable, NullWritable, NullWritable> {
public void reduce(LongWritable key, Iterable<NullWritable> values,
Context context) throws IOException {
if (context.getConfiguration().getBoolean(FAIL_REDUCE, false)) {
throw new RuntimeException("Intentional reduce failure");
}
context.setStatus("No worries");
}
}
public static void main(String[] args) throws Exception {
int res = ToolRunner.run(new Configuration(), new FailJob(), args);
System.exit(res);
}
public Job createJob(boolean failMappers, boolean failReducers, Path inputFile)
throws IOException {
Configuration conf = getConf();
conf.setBoolean(FAIL_MAP, failMappers);
conf.setBoolean(FAIL_REDUCE, failReducers);
Job job = Job.getInstance(conf, "fail");
job.setJarByClass(FailJob.class);
job.setMapperClass(FailMapper.class);
job.setMapOutputKeyClass(LongWritable.class);
job.setMapOutputValueClass(NullWritable.class);
job.setReducerClass(FailReducer.class);
job.setOutputFormatClass(NullOutputFormat.class);
job.setInputFormatClass(TextInputFormat.class);
job.setSpeculativeExecution(false);
job.setJobName("Fail job");
FileInputFormat.addInputPath(job, inputFile);
return job;
}
public int run(String[] args) throws Exception {
if(args.length < 1) {
System.err.println("FailJob " +
" (-failMappers|-failReducers)");
ToolRunner.printGenericCommandUsage(System.err);
return 2;
}
boolean failMappers = false, failReducers = false;
for (int i = 0; i < args.length; i++ ) {
if (args[i].equals("-failMappers")) {
failMappers = true;
}
else if(args[i].equals("-failReducers")) {
failReducers = true;
}
}
if (!(failMappers ^ failReducers)) {
System.err.println("Exactly one of -failMappers or -failReducers must be specified.");
return 3;
}
// Write a file with one line per mapper.
final FileSystem fs = FileSystem.get(getConf());
Path inputDir = new Path(FailJob.class.getSimpleName() + "_in");
fs.mkdirs(inputDir);
for (int i = 0; i < getConf().getInt("mapred.map.tasks", 1); ++i) {
BufferedWriter w = new BufferedWriter(new OutputStreamWriter(
fs.create(new Path(inputDir, Integer.toString(i)))));
w.write(Integer.toString(i) + "\n");
w.close();
}
Job job = createJob(failMappers, failReducers, inputDir);
return job.waitForCompletion(true) ? 0 : 1;
}
}
| 5,038 | 36.604478 | 92 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/RandomTextWriter.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Date;
import java.util.List;
import java.util.Random;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.ClusterStatus;
import org.apache.hadoop.mapred.JobClient;
import org.apache.hadoop.mapreduce.*;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
/**
* This program uses map/reduce to just run a distributed job where there is
* no interaction between the tasks and each task writes a large unsorted
* random sequence of words.
* In order for this program to generate data for terasort with a 5-10 words
* per key and 20-100 words per value, have the following config:
* <pre>{@code
* <?xml version="1.0"?>
* <?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
* <configuration>
* <property>
* <name>mapreduce.randomtextwriter.minwordskey</name>
* <value>5</value>
* </property>
* <property>
* <name>mapreduce.randomtextwriter.maxwordskey</name>
* <value>10</value>
* </property>
* <property>
* <name>mapreduce.randomtextwriter.minwordsvalue</name>
* <value>20</value>
* </property>
* <property>
* <name>mapreduce.randomtextwriter.maxwordsvalue</name>
* <value>100</value>
* </property>
* <property>
* <name>mapreduce.randomtextwriter.totalbytes</name>
* <value>1099511627776</value>
* </property>
* </configuration>}</pre>
*
* Equivalently, {@link RandomTextWriter} also supports all the above options
* and ones supported by {@link Tool} via the command-line.
*
* To run: bin/hadoop jar hadoop-${version}-examples.jar randomtextwriter
* [-outFormat <i>output format class</i>] <i>output</i>
*/
public class RandomTextWriter extends Configured implements Tool {
public static final String TOTAL_BYTES =
"mapreduce.randomtextwriter.totalbytes";
public static final String BYTES_PER_MAP =
"mapreduce.randomtextwriter.bytespermap";
public static final String MAPS_PER_HOST =
"mapreduce.randomtextwriter.mapsperhost";
public static final String MAX_VALUE = "mapreduce.randomtextwriter.maxwordsvalue";
public static final String MIN_VALUE = "mapreduce.randomtextwriter.minwordsvalue";
public static final String MIN_KEY = "mapreduce.randomtextwriter.minwordskey";
public static final String MAX_KEY = "mapreduce.randomtextwriter.maxwordskey";
static int printUsage() {
System.out.println("randomtextwriter " +
"[-outFormat <output format class>] " +
"<output>");
ToolRunner.printGenericCommandUsage(System.out);
return 2;
}
/**
* User counters
*/
static enum Counters { RECORDS_WRITTEN, BYTES_WRITTEN }
static class RandomTextMapper extends Mapper<Text, Text, Text, Text> {
private long numBytesToWrite;
private int minWordsInKey;
private int wordsInKeyRange;
private int minWordsInValue;
private int wordsInValueRange;
private Random random = new Random();
/**
* Save the configuration value that we need to write the data.
*/
public void setup(Context context) {
Configuration conf = context.getConfiguration();
numBytesToWrite = conf.getLong(BYTES_PER_MAP,
1*1024*1024*1024);
minWordsInKey = conf.getInt(MIN_KEY, 5);
wordsInKeyRange = (conf.getInt(MAX_KEY, 10) - minWordsInKey);
minWordsInValue = conf.getInt(MIN_VALUE, 10);
wordsInValueRange = (conf.getInt(MAX_VALUE, 100) - minWordsInValue);
}
/**
* Given an output filename, write a bunch of random records to it.
*/
public void map(Text key, Text value,
Context context) throws IOException,InterruptedException {
int itemCount = 0;
while (numBytesToWrite > 0) {
// Generate the key/value
int noWordsKey = minWordsInKey +
(wordsInKeyRange != 0 ? random.nextInt(wordsInKeyRange) : 0);
int noWordsValue = minWordsInValue +
(wordsInValueRange != 0 ? random.nextInt(wordsInValueRange) : 0);
Text keyWords = generateSentence(noWordsKey);
Text valueWords = generateSentence(noWordsValue);
// Write the sentence
context.write(keyWords, valueWords);
numBytesToWrite -= (keyWords.getLength() + valueWords.getLength());
// Update counters, progress etc.
context.getCounter(Counters.BYTES_WRITTEN).increment(
keyWords.getLength() + valueWords.getLength());
context.getCounter(Counters.RECORDS_WRITTEN).increment(1);
if (++itemCount % 200 == 0) {
context.setStatus("wrote record " + itemCount + ". " +
numBytesToWrite + " bytes left.");
}
}
context.setStatus("done with " + itemCount + " records.");
}
private Text generateSentence(int noWords) {
StringBuffer sentence = new StringBuffer();
String space = " ";
for (int i=0; i < noWords; ++i) {
sentence.append(words[random.nextInt(words.length)]);
sentence.append(space);
}
return new Text(sentence.toString());
}
}
/**
* This is the main routine for launching a distributed random write job.
* It runs 10 maps/node and each node writes 1 gig of data to a DFS file.
* The reduce doesn't do anything.
*
* @throws IOException
*/
public int run(String[] args) throws Exception {
if (args.length == 0) {
return printUsage();
}
Configuration conf = getConf();
JobClient client = new JobClient(conf);
ClusterStatus cluster = client.getClusterStatus();
int numMapsPerHost = conf.getInt(MAPS_PER_HOST, 10);
long numBytesToWritePerMap = conf.getLong(BYTES_PER_MAP,
1*1024*1024*1024);
if (numBytesToWritePerMap == 0) {
System.err.println("Cannot have " + BYTES_PER_MAP +" set to 0");
return -2;
}
long totalBytesToWrite = conf.getLong(TOTAL_BYTES,
numMapsPerHost*numBytesToWritePerMap*cluster.getTaskTrackers());
int numMaps = (int) (totalBytesToWrite / numBytesToWritePerMap);
if (numMaps == 0 && totalBytesToWrite > 0) {
numMaps = 1;
conf.setLong(BYTES_PER_MAP, totalBytesToWrite);
}
conf.setInt(MRJobConfig.NUM_MAPS, numMaps);
Job job = Job.getInstance(conf);
job.setJarByClass(RandomTextWriter.class);
job.setJobName("random-text-writer");
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(Text.class);
job.setInputFormatClass(RandomWriter.RandomInputFormat.class);
job.setMapperClass(RandomTextMapper.class);
Class<? extends OutputFormat> outputFormatClass =
SequenceFileOutputFormat.class;
List<String> otherArgs = new ArrayList<String>();
for(int i=0; i < args.length; ++i) {
try {
if ("-outFormat".equals(args[i])) {
outputFormatClass =
Class.forName(args[++i]).asSubclass(OutputFormat.class);
} else {
otherArgs.add(args[i]);
}
} catch (ArrayIndexOutOfBoundsException except) {
System.out.println("ERROR: Required parameter missing from " +
args[i-1]);
return printUsage(); // exits
}
}
job.setOutputFormatClass(outputFormatClass);
FileOutputFormat.setOutputPath(job, new Path(otherArgs.get(0)));
System.out.println("Running " + numMaps + " maps.");
// reducer NONE
job.setNumReduceTasks(0);
Date startTime = new Date();
System.out.println("Job started: " + startTime);
int ret = job.waitForCompletion(true) ? 0 : 1;
Date endTime = new Date();
System.out.println("Job ended: " + endTime);
System.out.println("The job took " +
(endTime.getTime() - startTime.getTime()) /1000 +
" seconds.");
return ret;
}
public static void main(String[] args) throws Exception {
int res = ToolRunner.run(new Configuration(), new RandomTextWriter(), args);
System.exit(res);
}
/**
* A random list of 1000 words from /usr/share/dict/words
*/
private static String[] words = {
"diurnalness", "Homoiousian",
"spiranthic", "tetragynian",
"silverhead", "ungreat",
"lithograph", "exploiter",
"physiologian", "by",
"hellbender", "Filipendula",
"undeterring", "antiscolic",
"pentagamist", "hypoid",
"cacuminal", "sertularian",
"schoolmasterism", "nonuple",
"gallybeggar", "phytonic",
"swearingly", "nebular",
"Confervales", "thermochemically",
"characinoid", "cocksuredom",
"fallacious", "feasibleness",
"debromination", "playfellowship",
"tramplike", "testa",
"participatingly", "unaccessible",
"bromate", "experientialist",
"roughcast", "docimastical",
"choralcelo", "blightbird",
"peptonate", "sombreroed",
"unschematized", "antiabolitionist",
"besagne", "mastication",
"bromic", "sviatonosite",
"cattimandoo", "metaphrastical",
"endotheliomyoma", "hysterolysis",
"unfulminated", "Hester",
"oblongly", "blurredness",
"authorling", "chasmy",
"Scorpaenidae", "toxihaemia",
"Dictograph", "Quakerishly",
"deaf", "timbermonger",
"strammel", "Thraupidae",
"seditious", "plerome",
"Arneb", "eristically",
"serpentinic", "glaumrie",
"socioromantic", "apocalypst",
"tartrous", "Bassaris",
"angiolymphoma", "horsefly",
"kenno", "astronomize",
"euphemious", "arsenide",
"untongued", "parabolicness",
"uvanite", "helpless",
"gemmeous", "stormy",
"templar", "erythrodextrin",
"comism", "interfraternal",
"preparative", "parastas",
"frontoorbital", "Ophiosaurus",
"diopside", "serosanguineous",
"ununiformly", "karyological",
"collegian", "allotropic",
"depravity", "amylogenesis",
"reformatory", "epidymides",
"pleurotropous", "trillium",
"dastardliness", "coadvice",
"embryotic", "benthonic",
"pomiferous", "figureheadship",
"Megaluridae", "Harpa",
"frenal", "commotion",
"abthainry", "cobeliever",
"manilla", "spiciferous",
"nativeness", "obispo",
"monilioid", "biopsic",
"valvula", "enterostomy",
"planosubulate", "pterostigma",
"lifter", "triradiated",
"venialness", "tum",
"archistome", "tautness",
"unswanlike", "antivenin",
"Lentibulariaceae", "Triphora",
"angiopathy", "anta",
"Dawsonia", "becomma",
"Yannigan", "winterproof",
"antalgol", "harr",
"underogating", "ineunt",
"cornberry", "flippantness",
"scyphostoma", "approbation",
"Ghent", "Macraucheniidae",
"scabbiness", "unanatomized",
"photoelasticity", "eurythermal",
"enation", "prepavement",
"flushgate", "subsequentially",
"Edo", "antihero",
"Isokontae", "unforkedness",
"porriginous", "daytime",
"nonexecutive", "trisilicic",
"morphiomania", "paranephros",
"botchedly", "impugnation",
"Dodecatheon", "obolus",
"unburnt", "provedore",
"Aktistetae", "superindifference",
"Alethea", "Joachimite",
"cyanophilous", "chorograph",
"brooky", "figured",
"periclitation", "quintette",
"hondo", "ornithodelphous",
"unefficient", "pondside",
"bogydom", "laurinoxylon",
"Shiah", "unharmed",
"cartful", "noncrystallized",
"abusiveness", "cromlech",
"japanned", "rizzomed",
"underskin", "adscendent",
"allectory", "gelatinousness",
"volcano", "uncompromisingly",
"cubit", "idiotize",
"unfurbelowed", "undinted",
"magnetooptics", "Savitar",
"diwata", "ramosopalmate",
"Pishquow", "tomorn",
"apopenptic", "Haversian",
"Hysterocarpus", "ten",
"outhue", "Bertat",
"mechanist", "asparaginic",
"velaric", "tonsure",
"bubble", "Pyrales",
"regardful", "glyphography",
"calabazilla", "shellworker",
"stradametrical", "havoc",
"theologicopolitical", "sawdust",
"diatomaceous", "jajman",
"temporomastoid", "Serrifera",
"Ochnaceae", "aspersor",
"trailmaking", "Bishareen",
"digitule", "octogynous",
"epididymitis", "smokefarthings",
"bacillite", "overcrown",
"mangonism", "sirrah",
"undecorated", "psychofugal",
"bismuthiferous", "rechar",
"Lemuridae", "frameable",
"thiodiazole", "Scanic",
"sportswomanship", "interruptedness",
"admissory", "osteopaedion",
"tingly", "tomorrowness",
"ethnocracy", "trabecular",
"vitally", "fossilism",
"adz", "metopon",
"prefatorial", "expiscate",
"diathermacy", "chronist",
"nigh", "generalizable",
"hysterogen", "aurothiosulphuric",
"whitlowwort", "downthrust",
"Protestantize", "monander",
"Itea", "chronographic",
"silicize", "Dunlop",
"eer", "componental",
"spot", "pamphlet",
"antineuritic", "paradisean",
"interruptor", "debellator",
"overcultured", "Florissant",
"hyocholic", "pneumatotherapy",
"tailoress", "rave",
"unpeople", "Sebastian",
"thermanesthesia", "Coniferae",
"swacking", "posterishness",
"ethmopalatal", "whittle",
"analgize", "scabbardless",
"naught", "symbiogenetically",
"trip", "parodist",
"columniform", "trunnel",
"yawler", "goodwill",
"pseudohalogen", "swangy",
"cervisial", "mediateness",
"genii", "imprescribable",
"pony", "consumptional",
"carposporangial", "poleax",
"bestill", "subfebrile",
"sapphiric", "arrowworm",
"qualminess", "ultraobscure",
"thorite", "Fouquieria",
"Bermudian", "prescriber",
"elemicin", "warlike",
"semiangle", "rotular",
"misthread", "returnability",
"seraphism", "precostal",
"quarried", "Babylonism",
"sangaree", "seelful",
"placatory", "pachydermous",
"bozal", "galbulus",
"spermaphyte", "cumbrousness",
"pope", "signifier",
"Endomycetaceae", "shallowish",
"sequacity", "periarthritis",
"bathysphere", "pentosuria",
"Dadaism", "spookdom",
"Consolamentum", "afterpressure",
"mutter", "louse",
"ovoviviparous", "corbel",
"metastoma", "biventer",
"Hydrangea", "hogmace",
"seizing", "nonsuppressed",
"oratorize", "uncarefully",
"benzothiofuran", "penult",
"balanocele", "macropterous",
"dishpan", "marten",
"absvolt", "jirble",
"parmelioid", "airfreighter",
"acocotl", "archesporial",
"hypoplastral", "preoral",
"quailberry", "cinque",
"terrestrially", "stroking",
"limpet", "moodishness",
"canicule", "archididascalian",
"pompiloid", "overstaid",
"introducer", "Italical",
"Christianopaganism", "prescriptible",
"subofficer", "danseuse",
"cloy", "saguran",
"frictionlessly", "deindividualization",
"Bulanda", "ventricous",
"subfoliar", "basto",
"scapuloradial", "suspend",
"stiffish", "Sphenodontidae",
"eternal", "verbid",
"mammonish", "upcushion",
"barkometer", "concretion",
"preagitate", "incomprehensible",
"tristich", "visceral",
"hemimelus", "patroller",
"stentorophonic", "pinulus",
"kerykeion", "brutism",
"monstership", "merciful",
"overinstruct", "defensibly",
"bettermost", "splenauxe",
"Mormyrus", "unreprimanded",
"taver", "ell",
"proacquittal", "infestation",
"overwoven", "Lincolnlike",
"chacona", "Tamil",
"classificational", "lebensraum",
"reeveland", "intuition",
"Whilkut", "focaloid",
"Eleusinian", "micromembrane",
"byroad", "nonrepetition",
"bacterioblast", "brag",
"ribaldrous", "phytoma",
"counteralliance", "pelvimetry",
"pelf", "relaster",
"thermoresistant", "aneurism",
"molossic", "euphonym",
"upswell", "ladhood",
"phallaceous", "inertly",
"gunshop", "stereotypography",
"laryngic", "refasten",
"twinling", "oflete",
"hepatorrhaphy", "electrotechnics",
"cockal", "guitarist",
"topsail", "Cimmerianism",
"larklike", "Llandovery",
"pyrocatechol", "immatchable",
"chooser", "metrocratic",
"craglike", "quadrennial",
"nonpoisonous", "undercolored",
"knob", "ultratense",
"balladmonger", "slait",
"sialadenitis", "bucketer",
"magnificently", "unstipulated",
"unscourged", "unsupercilious",
"packsack", "pansophism",
"soorkee", "percent",
"subirrigate", "champer",
"metapolitics", "spherulitic",
"involatile", "metaphonical",
"stachyuraceous", "speckedness",
"bespin", "proboscidiform",
"gul", "squit",
"yeelaman", "peristeropode",
"opacousness", "shibuichi",
"retinize", "yote",
"misexposition", "devilwise",
"pumpkinification", "vinny",
"bonze", "glossing",
"decardinalize", "transcortical",
"serphoid", "deepmost",
"guanajuatite", "wemless",
"arval", "lammy",
"Effie", "Saponaria",
"tetrahedral", "prolificy",
"excerpt", "dunkadoo",
"Spencerism", "insatiately",
"Gilaki", "oratorship",
"arduousness", "unbashfulness",
"Pithecolobium", "unisexuality",
"veterinarian", "detractive",
"liquidity", "acidophile",
"proauction", "sural",
"totaquina", "Vichyite",
"uninhabitedness", "allegedly",
"Gothish", "manny",
"Inger", "flutist",
"ticktick", "Ludgatian",
"homotransplant", "orthopedical",
"diminutively", "monogoneutic",
"Kenipsim", "sarcologist",
"drome", "stronghearted",
"Fameuse", "Swaziland",
"alen", "chilblain",
"beatable", "agglomeratic",
"constitutor", "tendomucoid",
"porencephalous", "arteriasis",
"boser", "tantivy",
"rede", "lineamental",
"uncontradictableness", "homeotypical",
"masa", "folious",
"dosseret", "neurodegenerative",
"subtransverse", "Chiasmodontidae",
"palaeotheriodont", "unstressedly",
"chalcites", "piquantness",
"lampyrine", "Aplacentalia",
"projecting", "elastivity",
"isopelletierin", "bladderwort",
"strander", "almud",
"iniquitously", "theologal",
"bugre", "chargeably",
"imperceptivity", "meriquinoidal",
"mesophyte", "divinator",
"perfunctory", "counterappellant",
"synovial", "charioteer",
"crystallographical", "comprovincial",
"infrastapedial", "pleasurehood",
"inventurous", "ultrasystematic",
"subangulated", "supraoesophageal",
"Vaishnavism", "transude",
"chrysochrous", "ungrave",
"reconciliable", "uninterpleaded",
"erlking", "wherefrom",
"aprosopia", "antiadiaphorist",
"metoxazine", "incalculable",
"umbellic", "predebit",
"foursquare", "unimmortal",
"nonmanufacture", "slangy",
"predisputant", "familist",
"preaffiliate", "friarhood",
"corelysis", "zoonitic",
"halloo", "paunchy",
"neuromimesis", "aconitine",
"hackneyed", "unfeeble",
"cubby", "autoschediastical",
"naprapath", "lyrebird",
"inexistency", "leucophoenicite",
"ferrogoslarite", "reperuse",
"uncombable", "tambo",
"propodiale", "diplomatize",
"Russifier", "clanned",
"corona", "michigan",
"nonutilitarian", "transcorporeal",
"bought", "Cercosporella",
"stapedius", "glandularly",
"pictorially", "weism",
"disilane", "rainproof",
"Caphtor", "scrubbed",
"oinomancy", "pseudoxanthine",
"nonlustrous", "redesertion",
"Oryzorictinae", "gala",
"Mycogone", "reappreciate",
"cyanoguanidine", "seeingness",
"breadwinner", "noreast",
"furacious", "epauliere",
"omniscribent", "Passiflorales",
"uninductive", "inductivity",
"Orbitolina", "Semecarpus",
"migrainoid", "steprelationship",
"phlogisticate", "mesymnion",
"sloped", "edificator",
"beneficent", "culm",
"paleornithology", "unurban",
"throbless", "amplexifoliate",
"sesquiquintile", "sapience",
"astucious", "dithery",
"boor", "ambitus",
"scotching", "uloid",
"uncompromisingness", "hoove",
"waird", "marshiness",
"Jerusalem", "mericarp",
"unevoked", "benzoperoxide",
"outguess", "pyxie",
"hymnic", "euphemize",
"mendacity", "erythremia",
"rosaniline", "unchatteled",
"lienteria", "Bushongo",
"dialoguer", "unrepealably",
"rivethead", "antideflation",
"vinegarish", "manganosiderite",
"doubtingness", "ovopyriform",
"Cephalodiscus", "Muscicapa",
"Animalivora", "angina",
"planispheric", "ipomoein",
"cuproiodargyrite", "sandbox",
"scrat", "Munnopsidae",
"shola", "pentafid",
"overstudiousness", "times",
"nonprofession", "appetible",
"valvulotomy", "goladar",
"uniarticular", "oxyterpene",
"unlapsing", "omega",
"trophonema", "seminonflammable",
"circumzenithal", "starer",
"depthwise", "liberatress",
"unleavened", "unrevolting",
"groundneedle", "topline",
"wandoo", "umangite",
"ordinant", "unachievable",
"oversand", "snare",
"avengeful", "unexplicit",
"mustafina", "sonable",
"rehabilitative", "eulogization",
"papery", "technopsychology",
"impressor", "cresylite",
"entame", "transudatory",
"scotale", "pachydermatoid",
"imaginary", "yeat",
"slipped", "stewardship",
"adatom", "cockstone",
"skyshine", "heavenful",
"comparability", "exprobratory",
"dermorhynchous", "parquet",
"cretaceous", "vesperal",
"raphis", "undangered",
"Glecoma", "engrain",
"counteractively", "Zuludom",
"orchiocatabasis", "Auriculariales",
"warriorwise", "extraorganismal",
"overbuilt", "alveolite",
"tetchy", "terrificness",
"widdle", "unpremonished",
"rebilling", "sequestrum",
"equiconvex", "heliocentricism",
"catabaptist", "okonite",
"propheticism", "helminthagogic",
"calycular", "giantly",
"wingable", "golem",
"unprovided", "commandingness",
"greave", "haply",
"doina", "depressingly",
"subdentate", "impairment",
"decidable", "neurotrophic",
"unpredict", "bicorporeal",
"pendulant", "flatman",
"intrabred", "toplike",
"Prosobranchiata", "farrantly",
"toxoplasmosis", "gorilloid",
"dipsomaniacal", "aquiline",
"atlantite", "ascitic",
"perculsive", "prospectiveness",
"saponaceous", "centrifugalization",
"dinical", "infravaginal",
"beadroll", "affaite",
"Helvidian", "tickleproof",
"abstractionism", "enhedge",
"outwealth", "overcontribute",
"coldfinch", "gymnastic",
"Pincian", "Munychian",
"codisjunct", "quad",
"coracomandibular", "phoenicochroite",
"amender", "selectivity",
"putative", "semantician",
"lophotrichic", "Spatangoidea",
"saccharogenic", "inferent",
"Triconodonta", "arrendation",
"sheepskin", "taurocolla",
"bunghole", "Machiavel",
"triakistetrahedral", "dehairer",
"prezygapophysial", "cylindric",
"pneumonalgia", "sleigher",
"emir", "Socraticism",
"licitness", "massedly",
"instructiveness", "sturdied",
"redecrease", "starosta",
"evictor", "orgiastic",
"squdge", "meloplasty",
"Tsonecan", "repealableness",
"swoony", "myesthesia",
"molecule", "autobiographist",
"reciprocation", "refective",
"unobservantness", "tricae",
"ungouged", "floatability",
"Mesua", "fetlocked",
"chordacentrum", "sedentariness",
"various", "laubanite",
"nectopod", "zenick",
"sequentially", "analgic",
"biodynamics", "posttraumatic",
"nummi", "pyroacetic",
"bot", "redescend",
"dispermy", "undiffusive",
"circular", "trillion",
"Uraniidae", "ploration",
"discipular", "potentness",
"sud", "Hu",
"Eryon", "plugger",
"subdrainage", "jharal",
"abscission", "supermarket",
"countergabion", "glacierist",
"lithotresis", "minniebush",
"zanyism", "eucalypteol",
"sterilely", "unrealize",
"unpatched", "hypochondriacism",
"critically", "cheesecutter",
};
}
| 40,591 | 52.551451 | 84 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestMapReduce.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce;
import java.io.BufferedReader;
import java.io.BufferedWriter;
import java.io.DataInputStream;
import java.io.IOException;
import java.io.InputStreamReader;
import java.io.OutputStreamWriter;
import java.io.File;
import java.util.Iterator;
import java.util.Random;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.SequenceFile;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.MapFileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat;
import org.junit.After;
import org.junit.Test;
import static org.junit.Assert.assertTrue;
/**********************************************************
* MapredLoadTest generates a bunch of work that exercises
* a Hadoop Map-Reduce system (and DFS, too). It goes through
* the following steps:
*
* 1) Take inputs 'range' and 'counts'.
* 2) Generate 'counts' random integers between 0 and range-1.
* 3) Create a file that lists each integer between 0 and range-1,
* and lists the number of times that integer was generated.
* 4) Emit a (very large) file that contains all the integers
* in the order generated.
* 5) After the file has been generated, read it back and count
* how many times each int was generated.
* 6) Compare this big count-map against the original one. If
* they match, then SUCCESS! Otherwise, FAILURE!
*
* OK, that's how we can think about it. What are the map-reduce
* steps that get the job done?
*
* 1) In a non-mapred thread, take the inputs 'range' and 'counts'.
* 2) In a non-mapread thread, generate the answer-key and write to disk.
* 3) In a mapred job, divide the answer key into K jobs.
* 4) A mapred 'generator' task consists of K map jobs. Each reads
* an individual "sub-key", and generates integers according to
* to it (though with a random ordering).
* 5) The generator's reduce task agglomerates all of those files
* into a single one.
* 6) A mapred 'reader' task consists of M map jobs. The output
* file is cut into M pieces. Each of the M jobs counts the
* individual ints in its chunk and creates a map of all seen ints.
* 7) A mapred job integrates all the count files into a single one.
*
**********************************************************/
public class TestMapReduce {
private static final File TEST_DIR = new File(
System.getProperty("test.build.data",
System.getProperty("java.io.tmpdir")), "TestMapReduce-mapreduce");
private static FileSystem fs;
static {
try {
fs = FileSystem.getLocal(new Configuration());
} catch (IOException ioe) {
fs = null;
}
}
/**
* Modified to make it a junit test.
* The RandomGen Job does the actual work of creating
* a huge file of assorted numbers. It receives instructions
* as to how many times each number should be counted. Then
* it emits those numbers in a crazy order.
*
* The map() function takes a key/val pair that describes
* a value-to-be-emitted (the key) and how many times it
* should be emitted (the value), aka "numtimes". map() then
* emits a series of intermediate key/val pairs. It emits
* 'numtimes' of these. The key is a random number and the
* value is the 'value-to-be-emitted'.
*
* The system collates and merges these pairs according to
* the random number. reduce() function takes in a key/value
* pair that consists of a crazy random number and a series
* of values that should be emitted. The random number key
* is now dropped, and reduce() emits a pair for every intermediate value.
* The emitted key is an intermediate value. The emitted value
* is just a blank string. Thus, we've created a huge file
* of numbers in random order, but where each number appears
* as many times as we were instructed.
*/
static class RandomGenMapper
extends Mapper<IntWritable, IntWritable, IntWritable, IntWritable> {
public void map(IntWritable key, IntWritable val,
Context context) throws IOException, InterruptedException {
int randomVal = key.get();
int randomCount = val.get();
for (int i = 0; i < randomCount; i++) {
context.write(new IntWritable(Math.abs(r.nextInt())),
new IntWritable(randomVal));
}
}
}
/**
*/
static class RandomGenReducer
extends Reducer<IntWritable, IntWritable, IntWritable, IntWritable> {
public void reduce(IntWritable key, Iterable<IntWritable> it,
Context context) throws IOException, InterruptedException {
for (IntWritable iw : it) {
context.write(iw, null);
}
}
}
/**
* The RandomCheck Job does a lot of our work. It takes
* in a num/string keyspace, and transforms it into a
* key/count(int) keyspace.
*
* The map() function just emits a num/1 pair for every
* num/string input pair.
*
* The reduce() function sums up all the 1s that were
* emitted for a single key. It then emits the key/total
* pair.
*
* This is used to regenerate the random number "answer key".
* Each key here is a random number, and the count is the
* number of times the number was emitted.
*/
static class RandomCheckMapper
extends Mapper<WritableComparable<?>, Text, IntWritable, IntWritable> {
public void map(WritableComparable<?> key, Text val,
Context context) throws IOException, InterruptedException {
context.write(new IntWritable(
Integer.parseInt(val.toString().trim())), new IntWritable(1));
}
}
/**
*/
static class RandomCheckReducer
extends Reducer<IntWritable, IntWritable, IntWritable, IntWritable> {
public void reduce(IntWritable key, Iterable<IntWritable> it,
Context context) throws IOException, InterruptedException {
int keyint = key.get();
int count = 0;
for (IntWritable iw : it) {
count++;
}
context.write(new IntWritable(keyint), new IntWritable(count));
}
}
/**
* The Merge Job is a really simple one. It takes in
* an int/int key-value set, and emits the same set.
* But it merges identical keys by adding their values.
*
* Thus, the map() function is just the identity function
* and reduce() just sums. Nothing to see here!
*/
static class MergeMapper
extends Mapper<IntWritable, IntWritable, IntWritable, IntWritable> {
public void map(IntWritable key, IntWritable val,
Context context) throws IOException, InterruptedException {
int keyint = key.get();
int valint = val.get();
context.write(new IntWritable(keyint), new IntWritable(valint));
}
}
static class MergeReducer
extends Reducer<IntWritable, IntWritable, IntWritable, IntWritable> {
public void reduce(IntWritable key, Iterator<IntWritable> it,
Context context) throws IOException, InterruptedException {
int keyint = key.get();
int total = 0;
while (it.hasNext()) {
total += it.next().get();
}
context.write(new IntWritable(keyint), new IntWritable(total));
}
}
private static int range = 10;
private static int counts = 100;
private static Random r = new Random();
@After
public void cleanup() {
FileUtil.fullyDelete(TEST_DIR);
}
@Test
public void testMapred() throws Exception {
launch();
}
private static void launch() throws Exception {
//
// Generate distribution of ints. This is the answer key.
//
Configuration conf = new Configuration();
int countsToGo = counts;
int dist[] = new int[range];
for (int i = 0; i < range; i++) {
double avgInts = (1.0 * countsToGo) / (range - i);
dist[i] = (int) Math.max(0, Math.round(avgInts +
(Math.sqrt(avgInts) * r.nextGaussian())));
countsToGo -= dist[i];
}
if (countsToGo > 0) {
dist[dist.length-1] += countsToGo;
}
//
// Write the answer key to a file.
//
Path testdir = new Path(TEST_DIR.getAbsolutePath());
if (!fs.mkdirs(testdir)) {
throw new IOException("Mkdirs failed to create " + testdir.toString());
}
Path randomIns = new Path(testdir, "genins");
if (!fs.mkdirs(randomIns)) {
throw new IOException("Mkdirs failed to create " + randomIns.toString());
}
Path answerkey = new Path(randomIns, "answer.key");
SequenceFile.Writer out =
SequenceFile.createWriter(fs, conf, answerkey, IntWritable.class,
IntWritable.class,
SequenceFile.CompressionType.NONE);
try {
for (int i = 0; i < range; i++) {
out.append(new IntWritable(i), new IntWritable(dist[i]));
}
} finally {
out.close();
}
printFiles(randomIns, conf);
//
// Now we need to generate the random numbers according to
// the above distribution.
//
// We create a lot of map tasks, each of which takes at least
// one "line" of the distribution. (That is, a certain number
// X is to be generated Y number of times.)
//
// A map task emits Y key/val pairs. The val is X. The key
// is a randomly-generated number.
//
// The reduce task gets its input sorted by key. That is, sorted
// in random order. It then emits a single line of text that
// for the given values. It does not emit the key.
//
// Because there's just one reduce task, we emit a single big
// file of random numbers.
//
Path randomOuts = new Path(testdir, "genouts");
fs.delete(randomOuts, true);
Job genJob = Job.getInstance(conf);
FileInputFormat.setInputPaths(genJob, randomIns);
genJob.setInputFormatClass(SequenceFileInputFormat.class);
genJob.setMapperClass(RandomGenMapper.class);
FileOutputFormat.setOutputPath(genJob, randomOuts);
genJob.setOutputKeyClass(IntWritable.class);
genJob.setOutputValueClass(IntWritable.class);
genJob.setReducerClass(RandomGenReducer.class);
genJob.setNumReduceTasks(1);
genJob.waitForCompletion(true);
printFiles(randomOuts, conf);
//
// Next, we read the big file in and regenerate the
// original map. It's split into a number of parts.
// (That number is 'intermediateReduces'.)
//
// We have many map tasks, each of which read at least one
// of the output numbers. For each number read in, the
// map task emits a key/value pair where the key is the
// number and the value is "1".
//
// We have a single reduce task, which receives its input
// sorted by the key emitted above. For each key, there will
// be a certain number of "1" values. The reduce task sums
// these values to compute how many times the given key was
// emitted.
//
// The reduce task then emits a key/val pair where the key
// is the number in question, and the value is the number of
// times the key was emitted. This is the same format as the
// original answer key (except that numbers emitted zero times
// will not appear in the regenerated key.) The answer set
// is split into a number of pieces. A final MapReduce job
// will merge them.
//
// There's not really a need to go to 10 reduces here
// instead of 1. But we want to test what happens when
// you have multiple reduces at once.
//
int intermediateReduces = 10;
Path intermediateOuts = new Path(testdir, "intermediateouts");
fs.delete(intermediateOuts, true);
Job checkJob = Job.getInstance(conf);
FileInputFormat.setInputPaths(checkJob, randomOuts);
checkJob.setMapperClass(RandomCheckMapper.class);
FileOutputFormat.setOutputPath(checkJob, intermediateOuts);
checkJob.setOutputKeyClass(IntWritable.class);
checkJob.setOutputValueClass(IntWritable.class);
checkJob.setOutputFormatClass(MapFileOutputFormat.class);
checkJob.setReducerClass(RandomCheckReducer.class);
checkJob.setNumReduceTasks(intermediateReduces);
checkJob.waitForCompletion(true);
printFiles(intermediateOuts, conf);
//
// OK, now we take the output from the last job and
// merge it down to a single file. The map() and reduce()
// functions don't really do anything except reemit tuples.
// But by having a single reduce task here, we end up merging
// all the files.
//
Path finalOuts = new Path(testdir, "finalouts");
fs.delete(finalOuts, true);
Job mergeJob = Job.getInstance(conf);
FileInputFormat.setInputPaths(mergeJob, intermediateOuts);
mergeJob.setInputFormatClass(SequenceFileInputFormat.class);
mergeJob.setMapperClass(MergeMapper.class);
FileOutputFormat.setOutputPath(mergeJob, finalOuts);
mergeJob.setOutputKeyClass(IntWritable.class);
mergeJob.setOutputValueClass(IntWritable.class);
mergeJob.setOutputFormatClass(SequenceFileOutputFormat.class);
mergeJob.setReducerClass(MergeReducer.class);
mergeJob.setNumReduceTasks(1);
mergeJob.waitForCompletion(true);
printFiles(finalOuts, conf);
//
// Finally, we compare the reconstructed answer key with the
// original one. Remember, we need to ignore zero-count items
// in the original key.
//
boolean success = true;
Path recomputedkey = new Path(finalOuts, "part-r-00000");
SequenceFile.Reader in = new SequenceFile.Reader(fs, recomputedkey, conf);
int totalseen = 0;
try {
IntWritable key = new IntWritable();
IntWritable val = new IntWritable();
for (int i = 0; i < range; i++) {
if (dist[i] == 0) {
continue;
}
if (!in.next(key, val)) {
System.err.println("Cannot read entry " + i);
success = false;
break;
} else {
if (!((key.get() == i) && (val.get() == dist[i]))) {
System.err.println("Mismatch! Pos=" + key.get() + ", i=" + i +
", val=" + val.get() + ", dist[i]=" + dist[i]);
success = false;
}
totalseen += val.get();
}
}
if (success) {
if (in.next(key, val)) {
System.err.println("Unnecessary lines in recomputed key!");
success = false;
}
}
} finally {
in.close();
}
int originalTotal = 0;
for (int i = 0; i < dist.length; i++) {
originalTotal += dist[i];
}
System.out.println("Original sum: " + originalTotal);
System.out.println("Recomputed sum: " + totalseen);
//
// Write to "results" whether the test succeeded or not.
//
Path resultFile = new Path(testdir, "results");
BufferedWriter bw = new BufferedWriter(
new OutputStreamWriter(fs.create(resultFile)));
try {
bw.write("Success=" + success + "\n");
System.out.println("Success=" + success);
} finally {
bw.close();
}
assertTrue("testMapRed failed", success);
fs.delete(testdir, true);
}
private static void printTextFile(FileSystem fs, Path p) throws IOException {
BufferedReader in = new BufferedReader(new InputStreamReader(fs.open(p)));
String line;
while ((line = in.readLine()) != null) {
System.out.println(" Row: " + line);
}
in.close();
}
private static void printSequenceFile(FileSystem fs, Path p,
Configuration conf) throws IOException {
SequenceFile.Reader r = new SequenceFile.Reader(fs, p, conf);
Object key = null;
Object value = null;
while ((key = r.next(key)) != null) {
value = r.getCurrentValue(value);
System.out.println(" Row: " + key + ", " + value);
}
r.close();
}
private static boolean isSequenceFile(FileSystem fs,
Path f) throws IOException {
DataInputStream in = fs.open(f);
try {
byte[] seq = "SEQ".getBytes();
for (int i = 0; i < seq.length; ++i) {
if (seq[i] != in.read()) {
return false;
}
}
} finally {
in.close();
}
return true;
}
private static void printFiles(Path dir,
Configuration conf) throws IOException {
FileSystem fs = dir.getFileSystem(conf);
for(FileStatus f: fs.listStatus(dir)) {
System.out.println("Reading " + f.getPath() + ": ");
if (f.isDirectory()) {
System.out.println(" it is a map file.");
printSequenceFile(fs, new Path(f.getPath(), "data"), conf);
} else if (isSequenceFile(fs, f.getPath())) {
System.out.println(" it is a sequence file.");
printSequenceFile(fs, f.getPath(), conf);
} else {
System.out.println(" it is a text file.");
printTextFile(fs, f.getPath());
}
}
}
/**
* Launches all the tasks in order.
*/
public static void main(String[] argv) throws Exception {
if (argv.length < 2) {
System.err.println("Usage: TestMapReduce <range> <counts>");
System.err.println();
System.err.println("Note: a good test will have a <counts> value" +
" that is substantially larger than the <range>");
return;
}
int i = 0;
range = Integer.parseInt(argv[i++]);
counts = Integer.parseInt(argv[i++]);
try {
launch();
} finally {
FileUtil.fullyDelete(TEST_DIR);
}
}
}
| 18,672 | 34.840691 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestTaskContext.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce;
import java.io.IOException;
import java.util.Iterator;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.HadoopTestCase;
import org.apache.hadoop.mapreduce.MapReduceTestUtil.DataCopyMapper;
import org.apache.hadoop.mapreduce.MapReduceTestUtil.DataCopyReducer;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Ignore;
import org.junit.Test;
/**
* Tests context api and {@link StatusReporter#getProgress()} via
* {@link TaskAttemptContext#getProgress()} API .
*/
@Ignore
public class TestTaskContext extends HadoopTestCase {
private static final Path rootTempDir =
new Path(System.getProperty("test.build.data", "/tmp"));
private static final Path testRootTempDir =
new Path(rootTempDir, "TestTaskContext");
private static FileSystem fs = null;
@BeforeClass
public static void setup() throws Exception {
fs = FileSystem.getLocal(new Configuration());
fs.delete(testRootTempDir, true);
fs.mkdirs(testRootTempDir);
}
@AfterClass
public static void cleanup() throws Exception {
fs.delete(testRootTempDir, true);
}
public TestTaskContext() throws IOException {
super(HadoopTestCase.CLUSTER_MR , HadoopTestCase.LOCAL_FS, 1, 1);
}
static String myStatus = "my status";
static class MyMapper extends Mapper<LongWritable, Text, LongWritable, Text> {
@Override
protected void setup(Context context) throws IOException {
context.setStatus(myStatus);
assertEquals(myStatus, context.getStatus());
}
}
/**
* Tests context.setStatus method.
* TODO fix testcase
* @throws IOException
* @throws InterruptedException
* @throws ClassNotFoundException
*/
@Test
@Ignore
public void testContextStatus()
throws IOException, InterruptedException, ClassNotFoundException {
Path test = new Path(testRootTempDir, "testContextStatus");
// test with 1 map and 0 reducers
// test with custom task status
int numMaps = 1;
Job job = MapReduceTestUtil.createJob(createJobConf(),
new Path(test, "in"), new Path(test, "out"), numMaps, 0);
job.setMapperClass(MyMapper.class);
job.waitForCompletion(true);
assertTrue("Job failed", job.isSuccessful());
TaskReport[] reports = job.getTaskReports(TaskType.MAP);
assertEquals(numMaps, reports.length);
assertEquals(myStatus, reports[0].getState());
// test with 1 map and 1 reducer
// test with default task status
int numReduces = 1;
job = MapReduceTestUtil.createJob(createJobConf(),
new Path(test, "in"), new Path(test, "out"), numMaps, numReduces);
job.setMapperClass(DataCopyMapper.class);
job.setReducerClass(DataCopyReducer.class);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(Text.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(Text.class);
// fail early
job.setMaxMapAttempts(1);
job.setMaxReduceAttempts(0);
// run the job and wait for completion
job.waitForCompletion(true);
assertTrue("Job failed", job.isSuccessful());
// check map task reports
// TODO fix testcase
// Disabling checks for now to get builds to run
/*
reports = job.getTaskReports(TaskType.MAP);
assertEquals(numMaps, reports.length);
assertEquals("map > sort", reports[0].getState());
// check reduce task reports
reports = job.getTaskReports(TaskType.REDUCE);
assertEquals(numReduces, reports.length);
assertEquals("reduce > reduce", reports[0].getState());
*/
}
// an input with 4 lines
private static final String INPUT = "Hi\nHi\nHi\nHi\n";
private static final int INPUT_LINES = INPUT.split("\n").length;
@SuppressWarnings("unchecked")
static class ProgressCheckerMapper
extends Mapper<LongWritable, Text, Text, Text> {
private int recordCount = 0;
private float progressRange = 0;
@Override
protected void setup(Context context) throws IOException {
// check if the map task attempt progress is 0
assertEquals("Invalid progress in map setup",
0.0f, context.getProgress(), 0f);
// define the progress boundaries
if (context.getNumReduceTasks() == 0) {
progressRange = 1f;
} else {
progressRange = 0.667f;
}
}
@Override
protected void map(LongWritable key, Text value,
org.apache.hadoop.mapreduce.Mapper.Context context)
throws IOException ,InterruptedException {
// get the map phase progress
float mapPhaseProgress = ((float)++recordCount)/INPUT_LINES;
// get the weighted map phase progress
float weightedMapProgress = progressRange * mapPhaseProgress;
// check the map progress
assertEquals("Invalid progress in map",
weightedMapProgress, context.getProgress(), 0f);
context.write(new Text(value.toString() + recordCount), value);
};
protected void cleanup(Mapper.Context context)
throws IOException, InterruptedException {
// check if the attempt progress is at the progress boundary
assertEquals("Invalid progress in map cleanup",
progressRange, context.getProgress(), 0f);
};
}
/**
* Tests new MapReduce map task's context.getProgress() method.
*
* @throws IOException
* @throws InterruptedException
* @throws ClassNotFoundException
*/
public void testMapContextProgress()
throws IOException, InterruptedException, ClassNotFoundException {
int numMaps = 1;
Path test = new Path(testRootTempDir, "testMapContextProgress");
Job job = MapReduceTestUtil.createJob(createJobConf(),
new Path(test, "in"), new Path(test, "out"), numMaps, 0, INPUT);
job.setMapperClass(ProgressCheckerMapper.class);
job.setMapOutputKeyClass(Text.class);
// fail early
job.setMaxMapAttempts(1);
job.waitForCompletion(true);
assertTrue("Job failed", job.isSuccessful());
}
@SuppressWarnings("unchecked")
static class ProgressCheckerReducer extends Reducer<Text, Text,
Text, Text> {
private int recordCount = 0;
private final float REDUCE_PROGRESS_RANGE = 1.0f/3;
private final float SHUFFLE_PROGRESS_RANGE = 1 - REDUCE_PROGRESS_RANGE;
protected void setup(final Reducer.Context context)
throws IOException, InterruptedException {
// Note that the reduce will read some segments before calling setup()
float reducePhaseProgress = ((float)++recordCount)/INPUT_LINES;
float weightedReducePhaseProgress =
REDUCE_PROGRESS_RANGE * reducePhaseProgress;
// check that the shuffle phase progress is accounted for
assertEquals("Invalid progress in reduce setup",
SHUFFLE_PROGRESS_RANGE + weightedReducePhaseProgress,
context.getProgress(), 0.01f);
};
public void reduce(Text key, Iterator<Text> values, Context context)
throws IOException, InterruptedException {
float reducePhaseProgress = ((float)++recordCount)/INPUT_LINES;
float weightedReducePhaseProgress =
REDUCE_PROGRESS_RANGE * reducePhaseProgress;
assertEquals("Invalid progress in reduce",
SHUFFLE_PROGRESS_RANGE + weightedReducePhaseProgress,
context.getProgress(), 0.01f);
}
protected void cleanup(Reducer.Context context)
throws IOException, InterruptedException {
// check if the reduce task has progress of 1 in the end
assertEquals("Invalid progress in reduce cleanup",
1.0f, context.getProgress(), 0f);
};
}
/**
* Tests new MapReduce reduce task's context.getProgress() method.
*
* @throws IOException
* @throws InterruptedException
* @throws ClassNotFoundException
*/
@Test
public void testReduceContextProgress()
throws IOException, InterruptedException, ClassNotFoundException {
int numTasks = 1;
Path test = new Path(testRootTempDir, "testReduceContextProgress");
Job job = MapReduceTestUtil.createJob(createJobConf(),
new Path(test, "in"), new Path(test, "out"), numTasks, numTasks,
INPUT);
job.setMapperClass(ProgressCheckerMapper.class);
job.setReducerClass(ProgressCheckerReducer.class);
job.setMapOutputKeyClass(Text.class);
// fail early
job.setMaxMapAttempts(1);
job.setMaxReduceAttempts(1);
job.waitForCompletion(true);
assertTrue("Job failed", job.isSuccessful());
}
}
| 9,672 | 34.5625 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestMRJobClient.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce;
import java.io.BufferedReader;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStreamReader;
import java.io.OutputStream;
import java.io.PipedInputStream;
import java.io.PipedOutputStream;
import java.io.PrintStream;
import org.junit.Assert;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapred.ClusterMapReduceTestCase;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
import org.apache.hadoop.mapreduce.tools.CLI;
import org.apache.hadoop.util.ExitUtil;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
/**
test CLI class. CLI class implemented the Tool interface.
Here test that CLI sends correct command with options and parameters.
*/
public class TestMRJobClient extends ClusterMapReduceTestCase {
private static final Log LOG = LogFactory.getLog(TestMRJobClient.class);
private Job runJob(Configuration conf) throws Exception {
String input = "hello1\nhello2\nhello3\n";
Job job = MapReduceTestUtil.createJob(conf, getInputDir(), getOutputDir(),
1, 1, input);
job.setJobName("mr");
job.setPriority(JobPriority.NORMAL);
job.waitForCompletion(true);
return job;
}
private Job runJobInBackGround(Configuration conf) throws Exception {
String input = "hello1\nhello2\nhello3\n";
Job job = MapReduceTestUtil.createJob(conf, getInputDir(), getOutputDir(),
1, 1, input);
job.setJobName("mr");
job.setPriority(JobPriority.NORMAL);
job.submit();
int i = 0;
while (i++ < 200 && job.getJobID() == null) {
LOG.info("waiting for jobId...");
Thread.sleep(100);
}
return job;
}
public static int runTool(Configuration conf, Tool tool, String[] args,
OutputStream out) throws Exception {
PrintStream oldOut = System.out;
PrintStream newOut = new PrintStream(out, true);
try {
System.setOut(newOut);
return ToolRunner.run(conf, tool, args);
} finally {
System.setOut(oldOut);
}
}
private static class BadOutputFormat extends TextOutputFormat<Object, Object> {
@Override
public void checkOutputSpecs(JobContext job) throws IOException {
throw new IOException();
}
}
public void testJobSubmissionSpecsAndFiles() throws Exception {
Configuration conf = createJobConf();
Job job = MapReduceTestUtil.createJob(conf, getInputDir(), getOutputDir(),
1, 1);
job.setOutputFormatClass(BadOutputFormat.class);
try {
job.submit();
fail("Should've thrown an exception while checking output specs.");
} catch (Exception e) {
assertTrue(e instanceof IOException);
}
Cluster cluster = new Cluster(conf);
Path jobStagingArea = JobSubmissionFiles.getStagingDir(cluster,
job.getConfiguration());
Path submitJobDir = new Path(jobStagingArea, "JobId");
Path submitJobFile = JobSubmissionFiles.getJobConfPath(submitJobDir);
assertFalse("Shouldn't have created a job file if job specs failed.",
FileSystem.get(conf).exists(submitJobFile));
}
/**
* main test method
*/
public void testJobClient() throws Exception {
Configuration conf = createJobConf();
Job job = runJob(conf);
String jobId = job.getJobID().toString();
// test all jobs list
testAllJobList(jobId, conf);
// test only submitted jobs list
testSubmittedJobList(conf);
// test job counter
testGetCounter(jobId, conf);
// status
testJobStatus(jobId, conf);
// test list of events
testJobEvents(jobId, conf);
// test job history
testJobHistory(conf);
// test tracker list
testListTrackers(conf);
// attempts list
testListAttemptIds(jobId, conf);
// black list
testListBlackList(conf);
// test method main and help screen
startStop();
// test a change job priority .
testChangingJobPriority(jobId, conf);
// submit job from file
testSubmit(conf);
// kill a task
testKillTask(conf);
// fail a task
testfailTask(conf);
// kill job
testKillJob(conf);
}
/**
* test fail task
*/
private void testfailTask(Configuration conf) throws Exception {
Job job = runJobInBackGround(conf);
CLI jc = createJobClient();
TaskID tid = new TaskID(job.getJobID(), TaskType.MAP, 0);
TaskAttemptID taid = new TaskAttemptID(tid, 1);
ByteArrayOutputStream out = new ByteArrayOutputStream();
// TaskAttemptId is not set
int exitCode = runTool(conf, jc, new String[] { "-fail-task" }, out);
assertEquals("Exit code", -1, exitCode);
runTool(conf, jc, new String[] { "-fail-task", taid.toString() }, out);
String answer = new String(out.toByteArray(), "UTF-8");
Assert
.assertTrue(answer.contains("Killed task " + taid + " by failing it"));
}
/**
* test a kill task
*/
private void testKillTask(Configuration conf) throws Exception {
Job job = runJobInBackGround(conf);
CLI jc = createJobClient();
TaskID tid = new TaskID(job.getJobID(), TaskType.MAP, 0);
TaskAttemptID taid = new TaskAttemptID(tid, 1);
ByteArrayOutputStream out = new ByteArrayOutputStream();
// bad parameters
int exitCode = runTool(conf, jc, new String[] { "-kill-task" }, out);
assertEquals("Exit code", -1, exitCode);
runTool(conf, jc, new String[] { "-kill-task", taid.toString() }, out);
String answer = new String(out.toByteArray(), "UTF-8");
Assert.assertTrue(answer.contains("Killed task " + taid));
}
/**
* test a kill job
*/
private void testKillJob(Configuration conf) throws Exception {
Job job = runJobInBackGround(conf);
String jobId = job.getJobID().toString();
CLI jc = createJobClient();
ByteArrayOutputStream out = new ByteArrayOutputStream();
// without jobId
int exitCode = runTool(conf, jc, new String[] { "-kill" }, out);
assertEquals("Exit code", -1, exitCode);
// good parameters
exitCode = runTool(conf, jc, new String[] { "-kill", jobId }, out);
assertEquals("Exit code", 0, exitCode);
String answer = new String(out.toByteArray(), "UTF-8");
assertTrue(answer.contains("Killed job " + jobId));
}
/**
* test submit task from file
*/
private void testSubmit(Configuration conf) throws Exception {
CLI jc = createJobClient();
Job job = MapReduceTestUtil.createJob(conf, getInputDir(), getOutputDir(),
1, 1, "ping");
job.setJobName("mr");
job.setPriority(JobPriority.NORMAL);
File fcon = File.createTempFile("config", ".xml");
FileSystem localFs = FileSystem.getLocal(conf);
String fconUri = new Path(fcon.getAbsolutePath())
.makeQualified(localFs.getUri(), localFs.getWorkingDirectory()).toUri()
.toString();
job.getConfiguration().writeXml(new FileOutputStream(fcon));
ByteArrayOutputStream out = new ByteArrayOutputStream();
// bad parameters
int exitCode = runTool(conf, jc, new String[] { "-submit" }, out);
assertEquals("Exit code", -1, exitCode);
exitCode = runTool(conf, jc,
new String[] { "-submit", fconUri }, out);
assertEquals("Exit code", 0, exitCode);
String answer = new String(out.toByteArray());
// in console was written
assertTrue(answer.contains("Created job "));
}
/**
* test start form console command without options
*/
private void startStop() {
ByteArrayOutputStream data = new ByteArrayOutputStream();
PrintStream error = System.err;
System.setErr(new PrintStream(data));
ExitUtil.disableSystemExit();
try {
CLI.main(new String[0]);
fail(" CLI.main should call System.exit");
} catch (ExitUtil.ExitException e) {
ExitUtil.resetFirstExitException();
assertEquals(-1, e.status);
} catch (Exception e) {
} finally {
System.setErr(error);
}
// in console should be written help text
String s = new String(data.toByteArray());
assertTrue(s.contains("-submit"));
assertTrue(s.contains("-status"));
assertTrue(s.contains("-kill"));
assertTrue(s.contains("-set-priority"));
assertTrue(s.contains("-events"));
assertTrue(s.contains("-history"));
assertTrue(s.contains("-list"));
assertTrue(s.contains("-list-active-trackers"));
assertTrue(s.contains("-list-blacklisted-trackers"));
assertTrue(s.contains("-list-attempt-ids"));
assertTrue(s.contains("-kill-task"));
assertTrue(s.contains("-fail-task"));
assertTrue(s.contains("-logs"));
}
/**
* black list
*/
private void testListBlackList(Configuration conf) throws Exception {
CLI jc = createJobClient();
ByteArrayOutputStream out = new ByteArrayOutputStream();
int exitCode = runTool(conf, jc, new String[] {
"-list-blacklisted-trackers", "second in" }, out);
assertEquals("Exit code", -1, exitCode);
exitCode = runTool(conf, jc, new String[] { "-list-blacklisted-trackers" },
out);
assertEquals("Exit code", 0, exitCode);
String line;
BufferedReader br = new BufferedReader(new InputStreamReader(
new ByteArrayInputStream(out.toByteArray())));
int counter = 0;
while ((line = br.readLine()) != null) {
LOG.info("line = " + line);
counter++;
}
assertEquals(0, counter);
}
/**
* print AttemptIds list
*/
private void testListAttemptIds(String jobId, Configuration conf)
throws Exception {
CLI jc = createJobClient();
ByteArrayOutputStream out = new ByteArrayOutputStream();
int exitCode = runTool(conf, jc, new String[] { "-list-attempt-ids" }, out);
assertEquals("Exit code", -1, exitCode);
exitCode = runTool(conf, jc, new String[] { "-list-attempt-ids", jobId,
"MAP", "completed" }, out);
assertEquals("Exit code", 0, exitCode);
String line;
BufferedReader br = new BufferedReader(new InputStreamReader(
new ByteArrayInputStream(out.toByteArray())));
int counter = 0;
while ((line = br.readLine()) != null) {
LOG.info("line = " + line);
counter++;
}
assertEquals(1, counter);
}
/**
* print tracker list
*/
private void testListTrackers(Configuration conf) throws Exception {
CLI jc = createJobClient();
ByteArrayOutputStream out = new ByteArrayOutputStream();
int exitCode = runTool(conf, jc, new String[] { "-list-active-trackers",
"second parameter" }, out);
assertEquals("Exit code", -1, exitCode);
exitCode = runTool(conf, jc, new String[] { "-list-active-trackers" }, out);
assertEquals("Exit code", 0, exitCode);
String line;
BufferedReader br = new BufferedReader(new InputStreamReader(
new ByteArrayInputStream(out.toByteArray())));
int counter = 0;
while ((line = br.readLine()) != null) {
LOG.info("line = " + line);
counter++;
}
assertEquals(2, counter);
}
/**
* print job history from file
*/
private void testJobHistory(Configuration conf) throws Exception {
CLI jc = createJobClient();
ByteArrayOutputStream out = new ByteArrayOutputStream();
File f = new File("src/test/resources/job_1329348432655_0001-10.jhist");
FileSystem localFs = FileSystem.getLocal(conf);
String historyFileUri = new Path(f.getAbsolutePath())
.makeQualified(localFs.getUri(), localFs.getWorkingDirectory()).toUri()
.toString();
// bad command
int exitCode = runTool(conf, jc, new String[] { "-history", "pul",
historyFileUri }, out);
assertEquals("Exit code", -1, exitCode);
exitCode = runTool(conf, jc, new String[] { "-history", "all",
historyFileUri }, out);
assertEquals("Exit code", 0, exitCode);
String line;
BufferedReader br = new BufferedReader(new InputStreamReader(
new ByteArrayInputStream(out.toByteArray())));
int counter = 0;
while ((line = br.readLine()) != null) {
LOG.info("line = " + line);
if (line.startsWith("task_")) {
counter++;
}
}
assertEquals(23, counter);
}
/**
* print job events list
*/
private void testJobEvents(String jobId, Configuration conf) throws Exception {
CLI jc = createJobClient();
ByteArrayOutputStream out = new ByteArrayOutputStream();
int exitCode = runTool(conf, jc, new String[] { "-events" }, out);
assertEquals("Exit code", -1, exitCode);
exitCode = runTool(conf, jc, new String[] { "-events", jobId, "0", "100" },
out);
assertEquals("Exit code", 0, exitCode);
String line;
BufferedReader br = new BufferedReader(new InputStreamReader(
new ByteArrayInputStream(out.toByteArray())));
int counter = 0;
String attemptId = ("attempt" + jobId.substring(3));
while ((line = br.readLine()) != null) {
LOG.info("line = " + line);
if (line.contains(attemptId)) {
counter++;
}
}
assertEquals(2, counter);
}
/**
* print job status
*/
private void testJobStatus(String jobId, Configuration conf) throws Exception {
CLI jc = createJobClient();
ByteArrayOutputStream out = new ByteArrayOutputStream();
// bad options
int exitCode = runTool(conf, jc, new String[] { "-status" }, out);
assertEquals("Exit code", -1, exitCode);
exitCode = runTool(conf, jc, new String[] { "-status", jobId }, out);
assertEquals("Exit code", 0, exitCode);
String line;
BufferedReader br = new BufferedReader(new InputStreamReader(
new ByteArrayInputStream(out.toByteArray())));
while ((line = br.readLine()) != null) {
LOG.info("line = " + line);
if (!line.contains("Job state:")) {
continue;
}
break;
}
assertNotNull(line);
assertTrue(line.contains("SUCCEEDED"));
}
/**
* print counters
*/
public void testGetCounter(String jobId, Configuration conf) throws Exception {
ByteArrayOutputStream out = new ByteArrayOutputStream();
// bad command
int exitCode = runTool(conf, createJobClient(),
new String[] { "-counter", }, out);
assertEquals("Exit code", -1, exitCode);
exitCode = runTool(conf, createJobClient(),
new String[] { "-counter", jobId,
"org.apache.hadoop.mapreduce.TaskCounter", "MAP_INPUT_RECORDS" },
out);
assertEquals("Exit code", 0, exitCode);
assertEquals("Counter", "3", out.toString().trim());
}
/**
* print a job list
*/
protected void testAllJobList(String jobId, Configuration conf)
throws Exception {
ByteArrayOutputStream out = new ByteArrayOutputStream();
// bad options
int exitCode = runTool(conf, createJobClient(), new String[] { "-list",
"alldata" }, out);
assertEquals("Exit code", -1, exitCode);
exitCode = runTool(conf, createJobClient(),
// all jobs
new String[] { "-list", "all" }, out);
assertEquals("Exit code", 0, exitCode);
BufferedReader br = new BufferedReader(new InputStreamReader(
new ByteArrayInputStream(out.toByteArray())));
String line;
int counter = 0;
while ((line = br.readLine()) != null) {
LOG.info("line = " + line);
if (line.contains(jobId)) {
counter++;
}
}
assertEquals(1, counter);
out.reset();
}
protected void testSubmittedJobList(Configuration conf) throws Exception {
Job job = runJobInBackGround(conf);
ByteArrayOutputStream out = new ByteArrayOutputStream();
String line;
int counter = 0;
// only submitted
int exitCode =
runTool(conf, createJobClient(), new String[] { "-list" }, out);
assertEquals("Exit code", 0, exitCode);
BufferedReader br =
new BufferedReader(new InputStreamReader(new ByteArrayInputStream(
out.toByteArray())));
counter = 0;
while ((line = br.readLine()) != null) {
LOG.info("line = " + line);
if (line.contains(job.getJobID().toString())) {
counter++;
}
}
// all jobs submitted! no current
assertEquals(1, counter);
}
protected void verifyJobPriority(String jobId, String priority,
Configuration conf, CLI jc) throws Exception {
PipedInputStream pis = new PipedInputStream();
PipedOutputStream pos = new PipedOutputStream(pis);
int exitCode = runTool(conf, jc, new String[] { "-list", "all" }, pos);
assertEquals("Exit code", 0, exitCode);
BufferedReader br = new BufferedReader(new InputStreamReader(pis));
String line;
while ((line = br.readLine()) != null) {
LOG.info("line = " + line);
if (!line.contains(jobId)) {
continue;
}
assertTrue(line.contains(priority));
break;
}
pis.close();
}
public void testChangingJobPriority(String jobId, Configuration conf)
throws Exception {
int exitCode = runTool(conf, createJobClient(),
new String[] { "-set-priority" }, new ByteArrayOutputStream());
assertEquals("Exit code", -1, exitCode);
exitCode = runTool(conf, createJobClient(), new String[] { "-set-priority",
jobId, "VERY_LOW" }, new ByteArrayOutputStream());
assertEquals("Exit code", 0, exitCode);
// because this method does not implemented still.
verifyJobPriority(jobId, "NORMAL", conf, createJobClient());
}
protected CLI createJobClient() throws IOException {
return new CLI();
}
}
| 18,435 | 33.204082 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestMROutputFormat.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import org.apache.hadoop.conf.Configurable;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.mapred.JobConf;
import org.junit.Test;
import static org.junit.Assert.assertTrue;
public class TestMROutputFormat {
@Test
public void testJobSubmission() throws Exception {
JobConf conf = new JobConf();
Job job = new Job(conf);
job.setInputFormatClass(TestInputFormat.class);
job.setMapperClass(TestMapper.class);
job.setOutputFormatClass(TestOutputFormat.class);
job.setOutputKeyClass(IntWritable.class);
job.setOutputValueClass(IntWritable.class);
job.waitForCompletion(true);
assertTrue(job.isSuccessful());
}
public static class TestMapper
extends Mapper<IntWritable, IntWritable, IntWritable, IntWritable> {
public void map(IntWritable key, IntWritable value, Context context)
throws IOException, InterruptedException {
context.write(key, value);
}
}
}
class TestInputFormat extends InputFormat<IntWritable, IntWritable> {
@Override
public RecordReader<IntWritable, IntWritable> createRecordReader(
InputSplit split, TaskAttemptContext context) throws IOException,
InterruptedException {
return new RecordReader<IntWritable, IntWritable>() {
private boolean done = false;
@Override
public void close() throws IOException {
}
@Override
public IntWritable getCurrentKey() throws IOException,
InterruptedException {
return new IntWritable(0);
}
@Override
public IntWritable getCurrentValue() throws IOException,
InterruptedException {
return new IntWritable(0);
}
@Override
public float getProgress() throws IOException, InterruptedException {
return done ? 0 : 1;
}
@Override
public void initialize(InputSplit split, TaskAttemptContext context)
throws IOException, InterruptedException {
}
@Override
public boolean nextKeyValue() throws IOException, InterruptedException {
if (!done) {
done = true;
return true;
}
return false;
}
};
}
@Override
public List<InputSplit> getSplits(JobContext context) throws IOException,
InterruptedException {
List<InputSplit> list = new ArrayList<InputSplit>();
list.add(new TestInputSplit());
return list;
}
}
class TestInputSplit extends InputSplit implements Writable {
@Override
public long getLength() throws IOException, InterruptedException {
return 1;
}
@Override
public String[] getLocations() throws IOException, InterruptedException {
String[] hosts = {"localhost"};
return hosts;
}
@Override
public void readFields(DataInput in) throws IOException {
}
@Override
public void write(DataOutput out) throws IOException {
}
}
class TestOutputFormat extends OutputFormat<IntWritable, IntWritable>
implements Configurable {
public static final String TEST_CONFIG_NAME = "mapred.test.jobsubmission";
private Configuration conf;
@Override
public void checkOutputSpecs(JobContext context) throws IOException,
InterruptedException {
conf.setBoolean(TEST_CONFIG_NAME, true);
}
@Override
public OutputCommitter getOutputCommitter(TaskAttemptContext context)
throws IOException, InterruptedException {
return new OutputCommitter() {
@Override
public void abortTask(TaskAttemptContext taskContext) throws IOException {
}
@Override
public void commitTask(TaskAttemptContext taskContext) throws IOException {
}
@Override
public boolean needsTaskCommit(TaskAttemptContext taskContext)
throws IOException {
return false;
}
@Override
public void setupJob(JobContext jobContext) throws IOException {
}
@Override
public void setupTask(TaskAttemptContext taskContext) throws IOException {
}
};
}
@Override
public RecordWriter<IntWritable, IntWritable> getRecordWriter(
TaskAttemptContext context) throws IOException, InterruptedException {
assertTrue(context.getConfiguration().getBoolean(TEST_CONFIG_NAME, false));
return new RecordWriter<IntWritable, IntWritable>() {
@Override
public void close(TaskAttemptContext context) throws IOException,
InterruptedException {
}
@Override
public void write(IntWritable key, IntWritable value) throws IOException,
InterruptedException {
}
};
}
@Override
public Configuration getConf() {
return conf;
}
@Override
public void setConf(Configuration conf) {
this.conf = conf;
}
}
| 5,722 | 26.781553 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/GenericMRLoadGenerator.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Date;
import java.util.List;
import java.util.Random;
import java.util.Stack;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.SequenceFile;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.io.WritableUtils;
import org.apache.hadoop.mapred.ClusterStatus;
import org.apache.hadoop.mapred.JobClient;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.FileSplit;
import org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.NullOutputFormat;
import org.apache.hadoop.util.GenericOptionsParser;
import org.apache.hadoop.util.ReflectionUtils;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
public class GenericMRLoadGenerator extends Configured implements Tool {
public static String MAP_PRESERVE_PERCENT =
"mapreduce.loadgen.sort.map.preserve.percent";
public static String REDUCE_PRESERVE_PERCENT =
"mapreduce.loadgen.sort.reduce.preserve.percent";
public static String INDIRECT_INPUT_FORMAT =
"mapreduce.loadgen.indirect.input.format";
public static String INDIRECT_INPUT_FILE =
"mapreduce.loadgen.indirect.input.file";
protected static int printUsage() {
System.err.println(
"Usage: [-m <maps>] [-r <reduces>]\n" +
" [-keepmap <percent>] [-keepred <percent>]\n" +
" [-indir <path>] [-outdir <path]\n" +
" [-inFormat[Indirect] <InputFormat>] [-outFormat <OutputFormat>]\n" +
" [-outKey <WritableComparable>] [-outValue <Writable>]\n");
GenericOptionsParser.printGenericCommandUsage(System.err);
return -1;
}
/**
* Configure a job given argv.
*/
public static boolean parseArgs(String[] argv, Job job) throws IOException {
if (argv.length < 1) {
return 0 == printUsage();
}
for(int i=0; i < argv.length; ++i) {
if (argv.length == i + 1) {
System.out.println("ERROR: Required parameter missing from " +
argv[i]);
return 0 == printUsage();
}
try {
if ("-r".equals(argv[i])) {
job.setNumReduceTasks(Integer.parseInt(argv[++i]));
} else if ("-inFormat".equals(argv[i])) {
job.setInputFormatClass(
Class.forName(argv[++i]).asSubclass(InputFormat.class));
} else if ("-outFormat".equals(argv[i])) {
job.setOutputFormatClass(
Class.forName(argv[++i]).asSubclass(OutputFormat.class));
} else if ("-outKey".equals(argv[i])) {
job.setOutputKeyClass(
Class.forName(argv[++i]).asSubclass(WritableComparable.class));
} else if ("-outValue".equals(argv[i])) {
job.setOutputValueClass(
Class.forName(argv[++i]).asSubclass(Writable.class));
} else if ("-keepmap".equals(argv[i])) {
job.getConfiguration().set(MAP_PRESERVE_PERCENT, argv[++i]);
} else if ("-keepred".equals(argv[i])) {
job.getConfiguration().set(REDUCE_PRESERVE_PERCENT, argv[++i]);
} else if ("-outdir".equals(argv[i])) {
FileOutputFormat.setOutputPath(job, new Path(argv[++i]));
} else if ("-indir".equals(argv[i])) {
FileInputFormat.addInputPaths(job, argv[++i]);
} else if ("-inFormatIndirect".equals(argv[i])) {
job.getConfiguration().setClass(INDIRECT_INPUT_FORMAT,
Class.forName(argv[++i]).asSubclass(InputFormat.class),
InputFormat.class);
job.setInputFormatClass(IndirectInputFormat.class);
} else {
System.out.println("Unexpected argument: " + argv[i]);
return 0 == printUsage();
}
} catch (NumberFormatException except) {
System.out.println("ERROR: Integer expected instead of " + argv[i]);
return 0 == printUsage();
} catch (Exception e) {
throw (IOException)new IOException().initCause(e);
}
}
return true;
}
public int run(String [] argv) throws Exception {
Job job = Job.getInstance(getConf());
job.setJarByClass(GenericMRLoadGenerator.class);
job.setMapperClass(SampleMapper.class);
job.setReducerClass(SampleReducer.class);
if (!parseArgs(argv, job)) {
return -1;
}
Configuration conf = job.getConfiguration();
if (null == FileOutputFormat.getOutputPath(job)) {
// No output dir? No writes
job.setOutputFormatClass(NullOutputFormat.class);
}
if (0 == FileInputFormat.getInputPaths(job).length) {
// No input dir? Generate random data
System.err.println("No input path; ignoring InputFormat");
confRandom(job);
} else if (null != conf.getClass(INDIRECT_INPUT_FORMAT, null)) {
// specified IndirectInputFormat? Build src list
JobClient jClient = new JobClient(conf);
Path tmpDir = new Path("/tmp");
Random r = new Random();
Path indirInputFile = new Path(tmpDir,
Integer.toString(r.nextInt(Integer.MAX_VALUE), 36) + "_files");
conf.set(INDIRECT_INPUT_FILE, indirInputFile.toString());
SequenceFile.Writer writer = SequenceFile.createWriter(
tmpDir.getFileSystem(conf), conf, indirInputFile,
LongWritable.class, Text.class,
SequenceFile.CompressionType.NONE);
try {
for (Path p : FileInputFormat.getInputPaths(job)) {
FileSystem fs = p.getFileSystem(conf);
Stack<Path> pathstack = new Stack<Path>();
pathstack.push(p);
while (!pathstack.empty()) {
for (FileStatus stat : fs.listStatus(pathstack.pop())) {
if (stat.isDirectory()) {
if (!stat.getPath().getName().startsWith("_")) {
pathstack.push(stat.getPath());
}
} else {
writer.sync();
writer.append(new LongWritable(stat.getLen()),
new Text(stat.getPath().toUri().toString()));
}
}
}
}
} finally {
writer.close();
}
}
Date startTime = new Date();
System.out.println("Job started: " + startTime);
int ret = job.waitForCompletion(true) ? 0 : 1;
Date endTime = new Date();
System.out.println("Job ended: " + endTime);
System.out.println("The job took " +
(endTime.getTime() - startTime.getTime()) /1000 +
" seconds.");
return ret;
}
/**
* Main driver/hook into ToolRunner.
*/
public static void main(String[] argv) throws Exception {
int res =
ToolRunner.run(new Configuration(), new GenericMRLoadGenerator(), argv);
System.exit(res);
}
static class RandomInputFormat extends InputFormat<Text, Text> {
public List<InputSplit> getSplits(JobContext job) {
int numSplits = job.getConfiguration().getInt(MRJobConfig.NUM_MAPS, 1);
List<InputSplit> splits = new ArrayList<InputSplit>();
for (int i = 0; i < numSplits; ++i) {
splits.add(new IndirectInputFormat.IndirectSplit(
new Path("ignore" + i), 1));
}
return splits;
}
public RecordReader<Text,Text> createRecordReader(InputSplit split,
TaskAttemptContext context) throws IOException {
final IndirectInputFormat.IndirectSplit clSplit =
(IndirectInputFormat.IndirectSplit)split;
return new RecordReader<Text,Text>() {
boolean once = true;
Text key = new Text();
Text value = new Text();
public boolean nextKeyValue() {
if (once) {
key.set(clSplit.getPath().toString());
once = false;
return true;
}
return false;
}
public void initialize(InputSplit split, TaskAttemptContext context)
throws IOException, InterruptedException {}
public Text getCurrentKey() { return key; }
public Text getCurrentValue() { return value; }
public void close() { }
public float getProgress() { return 0.0f; }
};
}
}
static enum Counters { RECORDS_WRITTEN, BYTES_WRITTEN }
static class RandomMapOutput extends Mapper<Text,Text,Text,Text> {
StringBuilder sentence = new StringBuilder();
int keymin;
int keymax;
int valmin;
int valmax;
long bytesToWrite;
Random r = new Random();
private int generateSentence(Text t, int noWords) {
sentence.setLength(0);
--noWords;
for (int i = 0; i < noWords; ++i) {
sentence.append(words[r.nextInt(words.length)]);
sentence.append(" ");
}
if (noWords >= 0) sentence.append(words[r.nextInt(words.length)]);
t.set(sentence.toString());
return sentence.length();
}
public void setup(Context context) {
Configuration conf = new Configuration();
bytesToWrite = conf.getLong(RandomTextWriter.BYTES_PER_MAP,
1*1024*1024*1024);
keymin = conf.getInt(RandomTextWriter.MIN_KEY, 5);
keymax = conf.getInt(RandomTextWriter.MAX_KEY, 10);
valmin = conf.getInt(RandomTextWriter.MIN_VALUE, 5);
valmax = conf.getInt(RandomTextWriter.MAX_VALUE, 10);
}
public void map(Text key, Text val, Context context)
throws IOException, InterruptedException {
long acc = 0L;
long recs = 0;
final int keydiff = keymax - keymin;
final int valdiff = valmax - valmin;
for (long i = 0L; acc < bytesToWrite; ++i) {
int recacc = 0;
recacc += generateSentence(key, keymin +
(0 == keydiff ? 0 : r.nextInt(keydiff)));
recacc += generateSentence(val, valmin +
(0 == valdiff ? 0 : r.nextInt(valdiff)));
context.write(key, val);
++recs;
acc += recacc;
context.getCounter(Counters.BYTES_WRITTEN).increment(recacc);
context.getCounter(Counters.RECORDS_WRITTEN).increment(1);
context.setStatus(acc + "/" + (bytesToWrite - acc) + " bytes");
}
context.setStatus("Wrote " + recs + " records");
}
}
/**
* When no input dir is specified, generate random data.
*/
protected static void confRandom(Job job)
throws IOException {
// from RandomWriter
job.setInputFormatClass(RandomInputFormat.class);
job.setMapperClass(RandomMapOutput.class);
Configuration conf = job.getConfiguration();
final ClusterStatus cluster = new JobClient(conf).getClusterStatus();
int numMapsPerHost = conf.getInt(RandomTextWriter.MAPS_PER_HOST, 10);
long numBytesToWritePerMap =
conf.getLong(RandomTextWriter.BYTES_PER_MAP, 1*1024*1024*1024);
if (numBytesToWritePerMap == 0) {
throw new IOException(
"Cannot have " + RandomTextWriter.BYTES_PER_MAP + " set to 0");
}
long totalBytesToWrite = conf.getLong(RandomTextWriter.TOTAL_BYTES,
numMapsPerHost * numBytesToWritePerMap * cluster.getTaskTrackers());
int numMaps = (int)(totalBytesToWrite / numBytesToWritePerMap);
if (numMaps == 0 && totalBytesToWrite > 0) {
numMaps = 1;
conf.setLong(RandomTextWriter.BYTES_PER_MAP, totalBytesToWrite);
}
conf.setInt(MRJobConfig.NUM_MAPS, numMaps);
}
// Sampling //
static abstract class SampleMapBase<K extends WritableComparable<?>,
V extends Writable> extends Mapper<K, V, K, V> {
private long total;
private long kept = 0;
private float keep;
public void setup(Context context) {
this.keep = context.getConfiguration().
getFloat(MAP_PRESERVE_PERCENT, (float)100.0) / (float)100.0;
}
protected void emit(K key, V val, Context context)
throws IOException, InterruptedException {
++total;
while((float) kept / total < keep) {
++kept;
context.write(key, val);
}
}
}
static abstract class SampleReduceBase<K extends WritableComparable<?>,
V extends Writable> extends Reducer<K, V, K, V> {
private long total;
private long kept = 0;
private float keep;
public void setup(Context context) {
this.keep = context.getConfiguration().getFloat(
REDUCE_PRESERVE_PERCENT, (float)100.0) / (float)100.0;
}
protected void emit(K key, V val, Context context)
throws IOException, InterruptedException {
++total;
while((float) kept / total < keep) {
++kept;
context.write(key, val);
}
}
}
public static class SampleMapper<K extends WritableComparable<?>,
V extends Writable>
extends SampleMapBase<K,V> {
public void map(K key, V val, Context context)
throws IOException, InterruptedException {
emit(key, val, context);
}
}
public static class SampleReducer<K extends WritableComparable<?>,
V extends Writable>
extends SampleReduceBase<K,V> {
public void reduce(K key, Iterable<V> values, Context context)
throws IOException, InterruptedException {
for (V value : values) {
emit(key, value, context);
}
}
}
// Indirect reads //
/**
* Obscures the InputFormat and location information to simulate maps
* reading input from arbitrary locations ("indirect" reads).
*/
static class IndirectInputFormat<K, V> extends InputFormat<K, V> {
static class IndirectSplit extends InputSplit {
Path file;
long len;
public IndirectSplit() { }
public IndirectSplit(Path file, long len) {
this.file = file;
this.len = len;
}
public Path getPath() { return file; }
public long getLength() { return len; }
public String[] getLocations() throws IOException {
return new String[]{};
}
public void write(DataOutput out) throws IOException {
WritableUtils.writeString(out, file.toString());
WritableUtils.writeVLong(out, len);
}
public void readFields(DataInput in) throws IOException {
file = new Path(WritableUtils.readString(in));
len = WritableUtils.readVLong(in);
}
}
public List<InputSplit> getSplits(JobContext job)
throws IOException {
Configuration conf = job.getConfiguration();
Path src = new Path(conf.get(INDIRECT_INPUT_FILE, null));
FileSystem fs = src.getFileSystem(conf);
List<InputSplit> splits = new ArrayList<InputSplit>();
LongWritable key = new LongWritable();
Text value = new Text();
for (SequenceFile.Reader sl = new SequenceFile.Reader(fs, src, conf);
sl.next(key, value);) {
splits.add(new IndirectSplit(new Path(value.toString()), key.get()));
}
return splits;
}
@SuppressWarnings("unchecked")
public RecordReader<K, V> createRecordReader(InputSplit split,
TaskAttemptContext context) throws IOException, InterruptedException {
Configuration conf = context.getConfiguration();
InputFormat<K, V> indirIF = (InputFormat)ReflectionUtils.newInstance(
conf.getClass(INDIRECT_INPUT_FORMAT,
SequenceFileInputFormat.class), conf);
IndirectSplit is = ((IndirectSplit)split);
return indirIF.createRecordReader(new FileSplit(is.getPath(), 0,
is.getLength(), (String[])null), context);
}
}
/**
* A random list of 1000 words from /usr/share/dict/words
*/
private static final String[] words = {
"diurnalness", "Homoiousian", "spiranthic", "tetragynian",
"silverhead", "ungreat", "lithograph", "exploiter",
"physiologian", "by", "hellbender", "Filipendula",
"undeterring", "antiscolic", "pentagamist", "hypoid",
"cacuminal", "sertularian", "schoolmasterism", "nonuple",
"gallybeggar", "phytonic", "swearingly", "nebular",
"Confervales", "thermochemically", "characinoid", "cocksuredom",
"fallacious", "feasibleness", "debromination", "playfellowship",
"tramplike", "testa", "participatingly", "unaccessible",
"bromate", "experientialist", "roughcast", "docimastical",
"choralcelo", "blightbird", "peptonate", "sombreroed",
"unschematized", "antiabolitionist", "besagne", "mastication",
"bromic", "sviatonosite", "cattimandoo", "metaphrastical",
"endotheliomyoma", "hysterolysis", "unfulminated", "Hester",
"oblongly", "blurredness", "authorling", "chasmy",
"Scorpaenidae", "toxihaemia", "Dictograph", "Quakerishly",
"deaf", "timbermonger", "strammel", "Thraupidae",
"seditious", "plerome", "Arneb", "eristically",
"serpentinic", "glaumrie", "socioromantic", "apocalypst",
"tartrous", "Bassaris", "angiolymphoma", "horsefly",
"kenno", "astronomize", "euphemious", "arsenide",
"untongued", "parabolicness", "uvanite", "helpless",
"gemmeous", "stormy", "templar", "erythrodextrin",
"comism", "interfraternal", "preparative", "parastas",
"frontoorbital", "Ophiosaurus", "diopside", "serosanguineous",
"ununiformly", "karyological", "collegian", "allotropic",
"depravity", "amylogenesis", "reformatory", "epidymides",
"pleurotropous", "trillium", "dastardliness", "coadvice",
"embryotic", "benthonic", "pomiferous", "figureheadship",
"Megaluridae", "Harpa", "frenal", "commotion",
"abthainry", "cobeliever", "manilla", "spiciferous",
"nativeness", "obispo", "monilioid", "biopsic",
"valvula", "enterostomy", "planosubulate", "pterostigma",
"lifter", "triradiated", "venialness", "tum",
"archistome", "tautness", "unswanlike", "antivenin",
"Lentibulariaceae", "Triphora", "angiopathy", "anta",
"Dawsonia", "becomma", "Yannigan", "winterproof",
"antalgol", "harr", "underogating", "ineunt",
"cornberry", "flippantness", "scyphostoma", "approbation",
"Ghent", "Macraucheniidae", "scabbiness", "unanatomized",
"photoelasticity", "eurythermal", "enation", "prepavement",
"flushgate", "subsequentially", "Edo", "antihero",
"Isokontae", "unforkedness", "porriginous", "daytime",
"nonexecutive", "trisilicic", "morphiomania", "paranephros",
"botchedly", "impugnation", "Dodecatheon", "obolus",
"unburnt", "provedore", "Aktistetae", "superindifference",
"Alethea", "Joachimite", "cyanophilous", "chorograph",
"brooky", "figured", "periclitation", "quintette",
"hondo", "ornithodelphous", "unefficient", "pondside",
"bogydom", "laurinoxylon", "Shiah", "unharmed",
"cartful", "noncrystallized", "abusiveness", "cromlech",
"japanned", "rizzomed", "underskin", "adscendent",
"allectory", "gelatinousness", "volcano", "uncompromisingly",
"cubit", "idiotize", "unfurbelowed", "undinted",
"magnetooptics", "Savitar", "diwata", "ramosopalmate",
"Pishquow", "tomorn", "apopenptic", "Haversian",
"Hysterocarpus", "ten", "outhue", "Bertat",
"mechanist", "asparaginic", "velaric", "tonsure",
"bubble", "Pyrales", "regardful", "glyphography",
"calabazilla", "shellworker", "stradametrical", "havoc",
"theologicopolitical", "sawdust", "diatomaceous", "jajman",
"temporomastoid", "Serrifera", "Ochnaceae", "aspersor",
"trailmaking", "Bishareen", "digitule", "octogynous",
"epididymitis", "smokefarthings", "bacillite", "overcrown",
"mangonism", "sirrah", "undecorated", "psychofugal",
"bismuthiferous", "rechar", "Lemuridae", "frameable",
"thiodiazole", "Scanic", "sportswomanship", "interruptedness",
"admissory", "osteopaedion", "tingly", "tomorrowness",
"ethnocracy", "trabecular", "vitally", "fossilism",
"adz", "metopon", "prefatorial", "expiscate",
"diathermacy", "chronist", "nigh", "generalizable",
"hysterogen", "aurothiosulphuric", "whitlowwort", "downthrust",
"Protestantize", "monander", "Itea", "chronographic",
"silicize", "Dunlop", "eer", "componental",
"spot", "pamphlet", "antineuritic", "paradisean",
"interruptor", "debellator", "overcultured", "Florissant",
"hyocholic", "pneumatotherapy", "tailoress", "rave",
"unpeople", "Sebastian", "thermanesthesia", "Coniferae",
"swacking", "posterishness", "ethmopalatal", "whittle",
"analgize", "scabbardless", "naught", "symbiogenetically",
"trip", "parodist", "columniform", "trunnel",
"yawler", "goodwill", "pseudohalogen", "swangy",
"cervisial", "mediateness", "genii", "imprescribable",
"pony", "consumptional", "carposporangial", "poleax",
"bestill", "subfebrile", "sapphiric", "arrowworm",
"qualminess", "ultraobscure", "thorite", "Fouquieria",
"Bermudian", "prescriber", "elemicin", "warlike",
"semiangle", "rotular", "misthread", "returnability",
"seraphism", "precostal", "quarried", "Babylonism",
"sangaree", "seelful", "placatory", "pachydermous",
"bozal", "galbulus", "spermaphyte", "cumbrousness",
"pope", "signifier", "Endomycetaceae", "shallowish",
"sequacity", "periarthritis", "bathysphere", "pentosuria",
"Dadaism", "spookdom", "Consolamentum", "afterpressure",
"mutter", "louse", "ovoviviparous", "corbel",
"metastoma", "biventer", "Hydrangea", "hogmace",
"seizing", "nonsuppressed", "oratorize", "uncarefully",
"benzothiofuran", "penult", "balanocele", "macropterous",
"dishpan", "marten", "absvolt", "jirble",
"parmelioid", "airfreighter", "acocotl", "archesporial",
"hypoplastral", "preoral", "quailberry", "cinque",
"terrestrially", "stroking", "limpet", "moodishness",
"canicule", "archididascalian", "pompiloid", "overstaid",
"introducer", "Italical", "Christianopaganism", "prescriptible",
"subofficer", "danseuse", "cloy", "saguran",
"frictionlessly", "deindividualization", "Bulanda", "ventricous",
"subfoliar", "basto", "scapuloradial", "suspend",
"stiffish", "Sphenodontidae", "eternal", "verbid",
"mammonish", "upcushion", "barkometer", "concretion",
"preagitate", "incomprehensible", "tristich", "visceral",
"hemimelus", "patroller", "stentorophonic", "pinulus",
"kerykeion", "brutism", "monstership", "merciful",
"overinstruct", "defensibly", "bettermost", "splenauxe",
"Mormyrus", "unreprimanded", "taver", "ell",
"proacquittal", "infestation", "overwoven", "Lincolnlike",
"chacona", "Tamil", "classificational", "lebensraum",
"reeveland", "intuition", "Whilkut", "focaloid",
"Eleusinian", "micromembrane", "byroad", "nonrepetition",
"bacterioblast", "brag", "ribaldrous", "phytoma",
"counteralliance", "pelvimetry", "pelf", "relaster",
"thermoresistant", "aneurism", "molossic", "euphonym",
"upswell", "ladhood", "phallaceous", "inertly",
"gunshop", "stereotypography", "laryngic", "refasten",
"twinling", "oflete", "hepatorrhaphy", "electrotechnics",
"cockal", "guitarist", "topsail", "Cimmerianism",
"larklike", "Llandovery", "pyrocatechol", "immatchable",
"chooser", "metrocratic", "craglike", "quadrennial",
"nonpoisonous", "undercolored", "knob", "ultratense",
"balladmonger", "slait", "sialadenitis", "bucketer",
"magnificently", "unstipulated", "unscourged", "unsupercilious",
"packsack", "pansophism", "soorkee", "percent",
"subirrigate", "champer", "metapolitics", "spherulitic",
"involatile", "metaphonical", "stachyuraceous", "speckedness",
"bespin", "proboscidiform", "gul", "squit",
"yeelaman", "peristeropode", "opacousness", "shibuichi",
"retinize", "yote", "misexposition", "devilwise",
"pumpkinification", "vinny", "bonze", "glossing",
"decardinalize", "transcortical", "serphoid", "deepmost",
"guanajuatite", "wemless", "arval", "lammy",
"Effie", "Saponaria", "tetrahedral", "prolificy",
"excerpt", "dunkadoo", "Spencerism", "insatiately",
"Gilaki", "oratorship", "arduousness", "unbashfulness",
"Pithecolobium", "unisexuality", "veterinarian", "detractive",
"liquidity", "acidophile", "proauction", "sural",
"totaquina", "Vichyite", "uninhabitedness", "allegedly",
"Gothish", "manny", "Inger", "flutist",
"ticktick", "Ludgatian", "homotransplant", "orthopedical",
"diminutively", "monogoneutic", "Kenipsim", "sarcologist",
"drome", "stronghearted", "Fameuse", "Swaziland",
"alen", "chilblain", "beatable", "agglomeratic",
"constitutor", "tendomucoid", "porencephalous", "arteriasis",
"boser", "tantivy", "rede", "lineamental",
"uncontradictableness", "homeotypical", "masa", "folious",
"dosseret", "neurodegenerative", "subtransverse", "Chiasmodontidae",
"palaeotheriodont", "unstressedly", "chalcites", "piquantness",
"lampyrine", "Aplacentalia", "projecting", "elastivity",
"isopelletierin", "bladderwort", "strander", "almud",
"iniquitously", "theologal", "bugre", "chargeably",
"imperceptivity", "meriquinoidal", "mesophyte", "divinator",
"perfunctory", "counterappellant", "synovial", "charioteer",
"crystallographical", "comprovincial", "infrastapedial", "pleasurehood",
"inventurous", "ultrasystematic", "subangulated", "supraoesophageal",
"Vaishnavism", "transude", "chrysochrous", "ungrave",
"reconciliable", "uninterpleaded", "erlking", "wherefrom",
"aprosopia", "antiadiaphorist", "metoxazine", "incalculable",
"umbellic", "predebit", "foursquare", "unimmortal",
"nonmanufacture", "slangy", "predisputant", "familist",
"preaffiliate", "friarhood", "corelysis", "zoonitic",
"halloo", "paunchy", "neuromimesis", "aconitine",
"hackneyed", "unfeeble", "cubby", "autoschediastical",
"naprapath", "lyrebird", "inexistency", "leucophoenicite",
"ferrogoslarite", "reperuse", "uncombable", "tambo",
"propodiale", "diplomatize", "Russifier", "clanned",
"corona", "michigan", "nonutilitarian", "transcorporeal",
"bought", "Cercosporella", "stapedius", "glandularly",
"pictorially", "weism", "disilane", "rainproof",
"Caphtor", "scrubbed", "oinomancy", "pseudoxanthine",
"nonlustrous", "redesertion", "Oryzorictinae", "gala",
"Mycogone", "reappreciate", "cyanoguanidine", "seeingness",
"breadwinner", "noreast", "furacious", "epauliere",
"omniscribent", "Passiflorales", "uninductive", "inductivity",
"Orbitolina", "Semecarpus", "migrainoid", "steprelationship",
"phlogisticate", "mesymnion", "sloped", "edificator",
"beneficent", "culm", "paleornithology", "unurban",
"throbless", "amplexifoliate", "sesquiquintile", "sapience",
"astucious", "dithery", "boor", "ambitus",
"scotching", "uloid", "uncompromisingness", "hoove",
"waird", "marshiness", "Jerusalem", "mericarp",
"unevoked", "benzoperoxide", "outguess", "pyxie",
"hymnic", "euphemize", "mendacity", "erythremia",
"rosaniline", "unchatteled", "lienteria", "Bushongo",
"dialoguer", "unrepealably", "rivethead", "antideflation",
"vinegarish", "manganosiderite", "doubtingness", "ovopyriform",
"Cephalodiscus", "Muscicapa", "Animalivora", "angina",
"planispheric", "ipomoein", "cuproiodargyrite", "sandbox",
"scrat", "Munnopsidae", "shola", "pentafid",
"overstudiousness", "times", "nonprofession", "appetible",
"valvulotomy", "goladar", "uniarticular", "oxyterpene",
"unlapsing", "omega", "trophonema", "seminonflammable",
"circumzenithal", "starer", "depthwise", "liberatress",
"unleavened", "unrevolting", "groundneedle", "topline",
"wandoo", "umangite", "ordinant", "unachievable",
"oversand", "snare", "avengeful", "unexplicit",
"mustafina", "sonable", "rehabilitative", "eulogization",
"papery", "technopsychology", "impressor", "cresylite",
"entame", "transudatory", "scotale", "pachydermatoid",
"imaginary", "yeat", "slipped", "stewardship",
"adatom", "cockstone", "skyshine", "heavenful",
"comparability", "exprobratory", "dermorhynchous", "parquet",
"cretaceous", "vesperal", "raphis", "undangered",
"Glecoma", "engrain", "counteractively", "Zuludom",
"orchiocatabasis", "Auriculariales", "warriorwise", "extraorganismal",
"overbuilt", "alveolite", "tetchy", "terrificness",
"widdle", "unpremonished", "rebilling", "sequestrum",
"equiconvex", "heliocentricism", "catabaptist", "okonite",
"propheticism", "helminthagogic", "calycular", "giantly",
"wingable", "golem", "unprovided", "commandingness",
"greave", "haply", "doina", "depressingly",
"subdentate", "impairment", "decidable", "neurotrophic",
"unpredict", "bicorporeal", "pendulant", "flatman",
"intrabred", "toplike", "Prosobranchiata", "farrantly",
"toxoplasmosis", "gorilloid", "dipsomaniacal", "aquiline",
"atlantite", "ascitic", "perculsive", "prospectiveness",
"saponaceous", "centrifugalization", "dinical", "infravaginal",
"beadroll", "affaite", "Helvidian", "tickleproof",
"abstractionism", "enhedge", "outwealth", "overcontribute",
"coldfinch", "gymnastic", "Pincian", "Munychian",
"codisjunct", "quad", "coracomandibular", "phoenicochroite",
"amender", "selectivity", "putative", "semantician",
"lophotrichic", "Spatangoidea", "saccharogenic", "inferent",
"Triconodonta", "arrendation", "sheepskin", "taurocolla",
"bunghole", "Machiavel", "triakistetrahedral", "dehairer",
"prezygapophysial", "cylindric", "pneumonalgia", "sleigher",
"emir", "Socraticism", "licitness", "massedly",
"instructiveness", "sturdied", "redecrease", "starosta",
"evictor", "orgiastic", "squdge", "meloplasty",
"Tsonecan", "repealableness", "swoony", "myesthesia",
"molecule", "autobiographist", "reciprocation", "refective",
"unobservantness", "tricae", "ungouged", "floatability",
"Mesua", "fetlocked", "chordacentrum", "sedentariness",
"various", "laubanite", "nectopod", "zenick",
"sequentially", "analgic", "biodynamics", "posttraumatic",
"nummi", "pyroacetic", "bot", "redescend",
"dispermy", "undiffusive", "circular", "trillion",
"Uraniidae", "ploration", "discipular", "potentness",
"sud", "Hu", "Eryon", "plugger",
"subdrainage", "jharal", "abscission", "supermarket",
"countergabion", "glacierist", "lithotresis", "minniebush",
"zanyism", "eucalypteol", "sterilely", "unrealize",
"unpatched", "hypochondriacism", "critically", "cheesecutter",
};
}
| 31,407 | 42.865922 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestMapperReducerCleanup.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce;
import java.io.BufferedWriter;
import java.io.DataOutputStream;
import java.io.IOException;
import java.io.OutputStream;
import java.io.OutputStreamWriter;
import java.util.StringTokenizer;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.LineRecordReader;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
import org.apache.hadoop.mapreduce.lib.reduce.IntSumReducer;
import org.junit.Assert;
import org.junit.Test;
public class TestMapperReducerCleanup {
static boolean mapCleanup = false;
static boolean reduceCleanup = false;
static boolean recordReaderCleanup = false;
static boolean recordWriterCleanup = false;
static void reset() {
mapCleanup = false;
reduceCleanup = false;
recordReaderCleanup = false;
recordWriterCleanup = false;
}
private static class FailingMapper
extends Mapper<LongWritable, Text, LongWritable, Text> {
/** Map method with different behavior based on the thread id */
public void map(LongWritable key, Text val, Context c)
throws IOException, InterruptedException {
throw new IOException("TestMapperReducerCleanup");
}
protected void cleanup(Context context)
throws IOException, InterruptedException {
mapCleanup = true;
super.cleanup(context);
}
}
private static class TrackingTokenizerMapper
extends Mapper<Object, Text, Text, IntWritable> {
private final static IntWritable one = new IntWritable(1);
private Text word = new Text();
public void map(Object key, Text value, Context context
) throws IOException, InterruptedException {
StringTokenizer itr = new StringTokenizer(value.toString());
while (itr.hasMoreTokens()) {
word.set(itr.nextToken());
context.write(word, one);
}
}
@SuppressWarnings({ "rawtypes", "unchecked" })
@Override
protected void cleanup(org.apache.hadoop.mapreduce.Mapper.Context context)
throws IOException, InterruptedException {
mapCleanup = true;
super.cleanup(context);
}
}
private static class FailingReducer
extends Reducer<LongWritable, Text, LongWritable, LongWritable> {
public void reduce(LongWritable key, Iterable<Text> vals, Context context)
throws IOException, InterruptedException {
throw new IOException("TestMapperReducerCleanup");
}
protected void cleanup(Context context)
throws IOException, InterruptedException {
reduceCleanup = true;
super.cleanup(context);
}
}
@SuppressWarnings("rawtypes")
private static class TrackingIntSumReducer extends IntSumReducer {
@SuppressWarnings("unchecked")
protected void cleanup(Context context)
throws IOException, InterruptedException {
reduceCleanup = true;
super.cleanup(context);
}
}
public static class TrackingTextInputFormat extends TextInputFormat {
public static class TrackingRecordReader extends LineRecordReader {
@Override
public synchronized void close() throws IOException {
recordReaderCleanup = true;
super.close();
}
}
@Override
public RecordReader<LongWritable, Text> createRecordReader(
InputSplit split, TaskAttemptContext context) {
return new TrackingRecordReader();
}
}
@SuppressWarnings("rawtypes")
public static class TrackingTextOutputFormat extends TextOutputFormat {
public static class TrackingRecordWriter extends LineRecordWriter {
public TrackingRecordWriter(DataOutputStream out) {
super(out);
}
@Override
public synchronized void close(TaskAttemptContext context)
throws IOException {
recordWriterCleanup = true;
super.close(context);
}
}
@Override
public RecordWriter getRecordWriter(TaskAttemptContext job)
throws IOException, InterruptedException {
Configuration conf = job.getConfiguration();
Path file = getDefaultWorkFile(job, "");
FileSystem fs = file.getFileSystem(conf);
FSDataOutputStream fileOut = fs.create(file, false);
return new TrackingRecordWriter(fileOut);
}
}
/**
* Create a single input file in the input directory.
* @param dirPath the directory in which the file resides
* @param id the file id number
* @param numRecords how many records to write to each file.
*/
private void createInputFile(Path dirPath, int id, int numRecords)
throws IOException {
final String MESSAGE = "This is a line in a file: ";
Path filePath = new Path(dirPath, "" + id);
Configuration conf = new Configuration();
FileSystem fs = FileSystem.getLocal(conf);
OutputStream os = fs.create(filePath);
BufferedWriter w = new BufferedWriter(new OutputStreamWriter(os));
for (int i = 0; i < numRecords; i++) {
w.write(MESSAGE + id + " " + i + "\n");
}
w.close();
}
private final String INPUT_DIR = "input";
private final String OUTPUT_DIR = "output";
private Path getInputPath() {
String dataDir = System.getProperty("test.build.data");
if (null == dataDir) {
return new Path(INPUT_DIR);
} else {
return new Path(new Path(dataDir), INPUT_DIR);
}
}
private Path getOutputPath() {
String dataDir = System.getProperty("test.build.data");
if (null == dataDir) {
return new Path(OUTPUT_DIR);
} else {
return new Path(new Path(dataDir), OUTPUT_DIR);
}
}
private Path createInput() throws IOException {
Configuration conf = new Configuration();
FileSystem fs = FileSystem.getLocal(conf);
Path inputPath = getInputPath();
// Clear the input directory if it exists, first.
if (fs.exists(inputPath)) {
fs.delete(inputPath, true);
}
// Create an input file
createInputFile(inputPath, 0, 10);
return inputPath;
}
@Test
public void testMapCleanup() throws Exception {
reset();
Job job = Job.getInstance();
Path inputPath = createInput();
Path outputPath = getOutputPath();
Configuration conf = new Configuration();
FileSystem fs = FileSystem.getLocal(conf);
if (fs.exists(outputPath)) {
fs.delete(outputPath, true);
}
job.setMapperClass(FailingMapper.class);
job.setInputFormatClass(TrackingTextInputFormat.class);
job.setOutputFormatClass(TrackingTextOutputFormat.class);
job.setNumReduceTasks(0);
FileInputFormat.addInputPath(job, inputPath);
FileOutputFormat.setOutputPath(job, outputPath);
job.waitForCompletion(true);
Assert.assertTrue(mapCleanup);
Assert.assertTrue(recordReaderCleanup);
Assert.assertTrue(recordWriterCleanup);
}
@Test
public void testReduceCleanup() throws Exception {
reset();
Job job = Job.getInstance();
Path inputPath = createInput();
Path outputPath = getOutputPath();
Configuration conf = new Configuration();
FileSystem fs = FileSystem.getLocal(conf);
if (fs.exists(outputPath)) {
fs.delete(outputPath, true);
}
job.setMapperClass(TrackingTokenizerMapper.class);
job.setReducerClass(FailingReducer.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(IntWritable.class);
job.setInputFormatClass(TrackingTextInputFormat.class);
job.setOutputFormatClass(TrackingTextOutputFormat.class);
job.setNumReduceTasks(1);
FileInputFormat.addInputPath(job, inputPath);
FileOutputFormat.setOutputPath(job, outputPath);
job.waitForCompletion(true);
Assert.assertTrue(mapCleanup);
Assert.assertTrue(reduceCleanup);
Assert.assertTrue(recordReaderCleanup);
Assert.assertTrue(recordWriterCleanup);
}
@Test
public void testJobSuccessCleanup() throws Exception {
reset();
Job job = Job.getInstance();
Path inputPath = createInput();
Path outputPath = getOutputPath();
Configuration conf = new Configuration();
FileSystem fs = FileSystem.getLocal(conf);
if (fs.exists(outputPath)) {
fs.delete(outputPath, true);
}
job.setMapperClass(TrackingTokenizerMapper.class);
job.setReducerClass(TrackingIntSumReducer.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(IntWritable.class);
job.setInputFormatClass(TrackingTextInputFormat.class);
job.setOutputFormatClass(TrackingTextOutputFormat.class);
job.setNumReduceTasks(1);
FileInputFormat.addInputPath(job, inputPath);
FileOutputFormat.setOutputPath(job, outputPath);
job.waitForCompletion(true);
Assert.assertTrue(mapCleanup);
Assert.assertTrue(reduceCleanup);
Assert.assertTrue(recordReaderCleanup);
Assert.assertTrue(recordWriterCleanup);
}
}
| 10,072 | 29.068657 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestLocalRunner.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce;
import java.io.BufferedReader;
import java.io.BufferedWriter;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.OutputStream;
import java.io.OutputStreamWriter;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.fs.*;
import org.apache.hadoop.mapred.LocalJobRunner;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.FileSplit;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.util.ReflectionUtils;
import org.junit.Test;
import junit.framework.TestCase;
/**
* Stress tests for the LocalJobRunner
*/
public class TestLocalRunner extends TestCase {
private static final Log LOG = LogFactory.getLog(TestLocalRunner.class);
private static int INPUT_SIZES[] =
new int[] { 50000, 500, 500, 20, 5000, 500};
private static int OUTPUT_SIZES[] =
new int[] { 1, 500, 500, 500, 500, 500};
private static int SLEEP_INTERVALS[] =
new int[] { 10000, 15, 15, 20, 250, 60 };
private static class StressMapper
extends Mapper<LongWritable, Text, LongWritable, Text> {
// Different map tasks operate at different speeds.
// We define behavior for 6 threads.
private int threadId;
// Used to ensure that the compiler doesn't optimize away
// some code.
public long exposedState;
protected void setup(Context context) {
// Get the thread num from the file number.
FileSplit split = (FileSplit) context.getInputSplit();
Path filePath = split.getPath();
String name = filePath.getName();
this.threadId = Integer.valueOf(name);
LOG.info("Thread " + threadId + " : "
+ context.getInputSplit());
}
/** Map method with different behavior based on the thread id */
public void map(LongWritable key, Text val, Context c)
throws IOException, InterruptedException {
// Write many values quickly.
for (int i = 0; i < OUTPUT_SIZES[threadId]; i++) {
c.write(new LongWritable(0), val);
if (i % SLEEP_INTERVALS[threadId] == 1) {
Thread.sleep(1);
}
}
}
protected void cleanup(Context context) {
// Output this here, to ensure that the incrementing done in map()
// cannot be optimized away.
LOG.debug("Busy loop counter: " + this.exposedState);
}
}
private static class CountingReducer
extends Reducer<LongWritable, Text, LongWritable, LongWritable> {
public void reduce(LongWritable key, Iterable<Text> vals, Context context)
throws IOException, InterruptedException {
long out = 0;
for (Text val : vals) {
out++;
}
context.write(key, new LongWritable(out));
}
}
private static class GCMapper
extends Mapper<LongWritable, Text, LongWritable, Text> {
public void map(LongWritable key, Text val, Context c)
throws IOException, InterruptedException {
// Create a whole bunch of objects.
List<Integer> lst = new ArrayList<Integer>();
for (int i = 0; i < 20000; i++) {
lst.add(new Integer(i));
}
// Actually use this list, to ensure that it isn't just optimized away.
int sum = 0;
for (int x : lst) {
sum += x;
}
// throw away the list and run a GC.
lst = null;
System.gc();
c.write(new LongWritable(sum), val);
}
}
/**
* Create a single input file in the input directory.
* @param dirPath the directory in which the file resides
* @param id the file id number
* @param numRecords how many records to write to each file.
*/
private void createInputFile(Path dirPath, int id, int numRecords)
throws IOException {
final String MESSAGE = "This is a line in a file: ";
Path filePath = new Path(dirPath, "" + id);
Configuration conf = new Configuration();
FileSystem fs = FileSystem.getLocal(conf);
OutputStream os = fs.create(filePath);
BufferedWriter w = new BufferedWriter(new OutputStreamWriter(os));
for (int i = 0; i < numRecords; i++) {
w.write(MESSAGE + id + " " + i + "\n");
}
w.close();
}
// This is the total number of map output records we expect to generate,
// based on input file sizes (see createMultiMapsInput()) and the behavior
// of the different StressMapper threads.
private static int TOTAL_RECORDS = 0;
static {
for (int i = 0; i < 6; i++) {
TOTAL_RECORDS += INPUT_SIZES[i] * OUTPUT_SIZES[i];
}
}
private final String INPUT_DIR = "multiMapInput";
private final String OUTPUT_DIR = "multiMapOutput";
private Path getInputPath() {
String dataDir = System.getProperty("test.build.data");
if (null == dataDir) {
return new Path(INPUT_DIR);
} else {
return new Path(new Path(dataDir), INPUT_DIR);
}
}
private Path getOutputPath() {
String dataDir = System.getProperty("test.build.data");
if (null == dataDir) {
return new Path(OUTPUT_DIR);
} else {
return new Path(new Path(dataDir), OUTPUT_DIR);
}
}
/**
* Create the inputs for the MultiMaps test.
* @return the path to the input directory.
*/
private Path createMultiMapsInput() throws IOException {
Configuration conf = new Configuration();
FileSystem fs = FileSystem.getLocal(conf);
Path inputPath = getInputPath();
// Clear the input directory if it exists, first.
if (fs.exists(inputPath)) {
fs.delete(inputPath, true);
}
// Create input files, with sizes calibrated based on
// the amount of work done in each mapper.
for (int i = 0; i < 6; i++) {
createInputFile(inputPath, i, INPUT_SIZES[i]);
}
return inputPath;
}
/**
* Verify that we got the correct amount of output.
*/
private void verifyOutput(Path outputPath) throws IOException {
Configuration conf = new Configuration();
FileSystem fs = FileSystem.getLocal(conf);
Path outputFile = new Path(outputPath, "part-r-00000");
InputStream is = fs.open(outputFile);
BufferedReader r = new BufferedReader(new InputStreamReader(is));
// Should get a single line of the form "0\t(count)"
String line = r.readLine().trim();
assertTrue("Line does not have correct key", line.startsWith("0\t"));
int count = Integer.valueOf(line.substring(2));
assertEquals("Incorrect count generated!", TOTAL_RECORDS, count);
r.close();
}
/**
* Test that the GC counter actually increments when we know that we've
* spent some time in the GC during the mapper.
*/
@Test
public void testGcCounter() throws Exception {
Path inputPath = getInputPath();
Path outputPath = getOutputPath();
Configuration conf = new Configuration();
FileSystem fs = FileSystem.getLocal(conf);
// Clear input/output dirs.
if (fs.exists(outputPath)) {
fs.delete(outputPath, true);
}
if (fs.exists(inputPath)) {
fs.delete(inputPath, true);
}
// Create one input file
createInputFile(inputPath, 0, 20);
// Now configure and run the job.
Job job = Job.getInstance();
job.setMapperClass(GCMapper.class);
job.setNumReduceTasks(0);
job.getConfiguration().set(MRJobConfig.IO_SORT_MB, "25");
FileInputFormat.addInputPath(job, inputPath);
FileOutputFormat.setOutputPath(job, outputPath);
boolean ret = job.waitForCompletion(true);
assertTrue("job failed", ret);
// This job should have done *some* gc work.
// It had to clean up 400,000 objects.
// We strongly suspect this will result in a few milliseconds effort.
Counter gcCounter = job.getCounters().findCounter(
TaskCounter.GC_TIME_MILLIS);
assertNotNull(gcCounter);
assertTrue("No time spent in gc", gcCounter.getValue() > 0);
}
/**
* Run a test with several mappers in parallel, operating at different
* speeds. Verify that the correct amount of output is created.
*/
@Test(timeout=120*1000)
public void testMultiMaps() throws Exception {
Job job = Job.getInstance();
Path inputPath = createMultiMapsInput();
Path outputPath = getOutputPath();
Configuration conf = new Configuration();
FileSystem fs = FileSystem.getLocal(conf);
if (fs.exists(outputPath)) {
fs.delete(outputPath, true);
}
job.setMapperClass(StressMapper.class);
job.setReducerClass(CountingReducer.class);
job.setNumReduceTasks(1);
LocalJobRunner.setLocalMaxRunningMaps(job, 6);
job.getConfiguration().set(MRJobConfig.IO_SORT_MB, "25");
FileInputFormat.addInputPath(job, inputPath);
FileOutputFormat.setOutputPath(job, outputPath);
final Thread toInterrupt = Thread.currentThread();
Thread interrupter = new Thread() {
public void run() {
try {
Thread.sleep(120*1000); // 2m
toInterrupt.interrupt();
} catch (InterruptedException ie) {}
}
};
LOG.info("Submitting job...");
job.submit();
LOG.info("Starting thread to interrupt main thread in 2 minutes");
interrupter.start();
LOG.info("Waiting for job to complete...");
try {
job.waitForCompletion(true);
} catch (InterruptedException ie) {
LOG.fatal("Interrupted while waiting for job completion", ie);
for (int i = 0; i < 10; i++) {
LOG.fatal("Dumping stacks");
ReflectionUtils.logThreadInfo(LOG, "multimap threads", 0);
Thread.sleep(1000);
}
throw ie;
}
LOG.info("Job completed, stopping interrupter");
interrupter.interrupt();
try {
interrupter.join();
} catch (InterruptedException ie) {
// it might interrupt us right as we interrupt it
}
LOG.info("Verifying output");
verifyOutput(outputPath);
}
/**
* Run a test with a misconfigured number of mappers.
* Expect failure.
*/
@Test
public void testInvalidMultiMapParallelism() throws Exception {
Job job = Job.getInstance();
Path inputPath = createMultiMapsInput();
Path outputPath = getOutputPath();
Configuration conf = new Configuration();
FileSystem fs = FileSystem.getLocal(conf);
if (fs.exists(outputPath)) {
fs.delete(outputPath, true);
}
job.setMapperClass(StressMapper.class);
job.setReducerClass(CountingReducer.class);
job.setNumReduceTasks(1);
LocalJobRunner.setLocalMaxRunningMaps(job, -6);
FileInputFormat.addInputPath(job, inputPath);
FileOutputFormat.setOutputPath(job, outputPath);
boolean success = job.waitForCompletion(true);
assertFalse("Job succeeded somehow", success);
}
/** An IF that creates no splits */
private static class EmptyInputFormat extends InputFormat<Object, Object> {
public List<InputSplit> getSplits(JobContext context) {
return new ArrayList<InputSplit>();
}
public RecordReader<Object, Object> createRecordReader(InputSplit split,
TaskAttemptContext context) {
return new EmptyRecordReader();
}
}
private static class EmptyRecordReader extends RecordReader<Object, Object> {
public void initialize(InputSplit split, TaskAttemptContext context) {
}
public Object getCurrentKey() {
return new Object();
}
public Object getCurrentValue() {
return new Object();
}
public float getProgress() {
return 0.0f;
}
public void close() {
}
public boolean nextKeyValue() {
return false;
}
}
/** Test case for zero mappers */
@Test
public void testEmptyMaps() throws Exception {
Job job = Job.getInstance();
Path outputPath = getOutputPath();
Configuration conf = new Configuration();
FileSystem fs = FileSystem.getLocal(conf);
if (fs.exists(outputPath)) {
fs.delete(outputPath, true);
}
job.setInputFormatClass(EmptyInputFormat.class);
job.setNumReduceTasks(1);
FileOutputFormat.setOutputPath(job, outputPath);
boolean success = job.waitForCompletion(true);
assertTrue("Empty job should work", success);
}
/** @return the directory where numberfiles are written (mapper inputs) */
private Path getNumberDirPath() {
return new Path(getInputPath(), "numberfiles");
}
/**
* Write out an input file containing an integer.
*
* @param fileNum the file number to write to.
* @param value the value to write to the file
* @return the path of the written file.
*/
private Path makeNumberFile(int fileNum, int value) throws IOException {
Path workDir = getNumberDirPath();
Path filePath = new Path(workDir, "file" + fileNum);
Configuration conf = new Configuration();
FileSystem fs = FileSystem.getLocal(conf);
OutputStream os = fs.create(filePath);
BufferedWriter w = new BufferedWriter(new OutputStreamWriter(os));
w.write("" + value);
w.close();
return filePath;
}
/**
* Each record received by this mapper is a number 'n'.
* Emit the values [0..n-1]
*/
public static class SequenceMapper
extends Mapper<LongWritable, Text, Text, NullWritable> {
public void map(LongWritable k, Text v, Context c)
throws IOException, InterruptedException {
int max = Integer.valueOf(v.toString());
for (int i = 0; i < max; i++) {
c.write(new Text("" + i), NullWritable.get());
}
}
}
private final static int NUMBER_FILE_VAL = 100;
/**
* Tally up the values and ensure that we got as much data
* out as we put in.
* Each mapper generated 'NUMBER_FILE_VAL' values (0..NUMBER_FILE_VAL-1).
* Verify that across all our reducers we got exactly this much
* data back.
*/
private void verifyNumberJob(int numMaps) throws Exception {
Path outputDir = getOutputPath();
Configuration conf = new Configuration();
FileSystem fs = FileSystem.getLocal(conf);
FileStatus [] stats = fs.listStatus(outputDir);
int valueSum = 0;
for (FileStatus f : stats) {
FSDataInputStream istream = fs.open(f.getPath());
BufferedReader r = new BufferedReader(new InputStreamReader(istream));
String line = null;
while ((line = r.readLine()) != null) {
valueSum += Integer.valueOf(line.trim());
}
r.close();
}
int maxVal = NUMBER_FILE_VAL - 1;
int expectedPerMapper = maxVal * (maxVal + 1) / 2;
int expectedSum = expectedPerMapper * numMaps;
LOG.info("expected sum: " + expectedSum + ", got " + valueSum);
assertEquals("Didn't get all our results back", expectedSum, valueSum);
}
/**
* Run a test which creates a SequenceMapper / IdentityReducer
* job over a set of generated number files.
*/
private void doMultiReducerTest(int numMaps, int numReduces,
int parallelMaps, int parallelReduces) throws Exception {
Path in = getNumberDirPath();
Path out = getOutputPath();
// Clear data from any previous tests.
Configuration conf = new Configuration();
FileSystem fs = FileSystem.getLocal(conf);
if (fs.exists(out)) {
fs.delete(out, true);
}
if (fs.exists(in)) {
fs.delete(in, true);
}
for (int i = 0; i < numMaps; i++) {
makeNumberFile(i, 100);
}
Job job = Job.getInstance();
job.setNumReduceTasks(numReduces);
job.setMapperClass(SequenceMapper.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(NullWritable.class);
FileInputFormat.addInputPath(job, in);
FileOutputFormat.setOutputPath(job, out);
LocalJobRunner.setLocalMaxRunningMaps(job, parallelMaps);
LocalJobRunner.setLocalMaxRunningReduces(job, parallelReduces);
boolean result = job.waitForCompletion(true);
assertTrue("Job failed!!", result);
verifyNumberJob(numMaps);
}
@Test
public void testOneMapMultiReduce() throws Exception {
doMultiReducerTest(1, 2, 1, 1);
}
@Test
public void testOneMapMultiParallelReduce() throws Exception {
doMultiReducerTest(1, 2, 1, 2);
}
@Test
public void testMultiMapOneReduce() throws Exception {
doMultiReducerTest(4, 1, 2, 1);
}
@Test
public void testMultiMapMultiReduce() throws Exception {
doMultiReducerTest(4, 4, 2, 2);
}
}
| 17,300 | 29.141115 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/SleepJob.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce;
import java.io.IOException;
import java.io.DataInput;
import java.io.DataOutput;
import java.util.ArrayList;
import java.util.List;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.mapreduce.lib.output.NullOutputFormat;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
/**
* Dummy class for testing MR framefork. Sleeps for a defined period
* of time in mapper and reducer. Generates fake input for map / reduce
* jobs. Note that generated number of input pairs is in the order
* of <code>numMappers * mapSleepTime / 100</code>, so the job uses
* some disk space.
*/
public class SleepJob extends Configured implements Tool {
public static String MAP_SLEEP_COUNT = "mapreduce.sleepjob.map.sleep.count";
public static String REDUCE_SLEEP_COUNT =
"mapreduce.sleepjob.reduce.sleep.count";
public static String MAP_SLEEP_TIME = "mapreduce.sleepjob.map.sleep.time";
public static String REDUCE_SLEEP_TIME =
"mapreduce.sleepjob.reduce.sleep.time";
public static class SleepJobPartitioner extends
Partitioner<IntWritable, NullWritable> {
public int getPartition(IntWritable k, NullWritable v, int numPartitions) {
return k.get() % numPartitions;
}
}
public static class EmptySplit extends InputSplit implements Writable {
public void write(DataOutput out) throws IOException { }
public void readFields(DataInput in) throws IOException { }
public long getLength() { return 0L; }
public String[] getLocations() { return new String[0]; }
}
public static class SleepInputFormat
extends InputFormat<IntWritable,IntWritable> {
public List<InputSplit> getSplits(JobContext jobContext) {
List<InputSplit> ret = new ArrayList<InputSplit>();
int numSplits = jobContext.getConfiguration().
getInt(MRJobConfig.NUM_MAPS, 1);
for (int i = 0; i < numSplits; ++i) {
ret.add(new EmptySplit());
}
return ret;
}
public RecordReader<IntWritable,IntWritable> createRecordReader(
InputSplit ignored, TaskAttemptContext taskContext)
throws IOException {
Configuration conf = taskContext.getConfiguration();
final int count = conf.getInt(MAP_SLEEP_COUNT, 1);
if (count < 0) throw new IOException("Invalid map count: " + count);
final int redcount = conf.getInt(REDUCE_SLEEP_COUNT, 1);
if (redcount < 0)
throw new IOException("Invalid reduce count: " + redcount);
final int emitPerMapTask = (redcount * taskContext.getNumReduceTasks());
return new RecordReader<IntWritable,IntWritable>() {
private int records = 0;
private int emitCount = 0;
private IntWritable key = null;
private IntWritable value = null;
public void initialize(InputSplit split, TaskAttemptContext context) {
}
public boolean nextKeyValue()
throws IOException {
if (count == 0) {
return false;
}
key = new IntWritable();
key.set(emitCount);
int emit = emitPerMapTask / count;
if ((emitPerMapTask) % count > records) {
++emit;
}
emitCount += emit;
value = new IntWritable();
value.set(emit);
return records++ < count;
}
public IntWritable getCurrentKey() { return key; }
public IntWritable getCurrentValue() { return value; }
public void close() throws IOException { }
public float getProgress() throws IOException {
return count == 0 ? 100 : records / ((float)count);
}
};
}
}
public static class SleepMapper
extends Mapper<IntWritable, IntWritable, IntWritable, NullWritable> {
private long mapSleepDuration = 100;
private int mapSleepCount = 1;
private int count = 0;
protected void setup(Context context)
throws IOException, InterruptedException {
Configuration conf = context.getConfiguration();
this.mapSleepCount =
conf.getInt(MAP_SLEEP_COUNT, mapSleepCount);
this.mapSleepDuration = mapSleepCount == 0 ? 0 :
conf.getLong(MAP_SLEEP_TIME , 100) / mapSleepCount;
}
public void map(IntWritable key, IntWritable value, Context context
) throws IOException, InterruptedException {
//it is expected that every map processes mapSleepCount number of records.
try {
context.setStatus("Sleeping... (" +
(mapSleepDuration * (mapSleepCount - count)) + ") ms left");
Thread.sleep(mapSleepDuration);
}
catch (InterruptedException ex) {
throw (IOException)new IOException(
"Interrupted while sleeping").initCause(ex);
}
++count;
// output reduceSleepCount * numReduce number of random values, so that
// each reducer will get reduceSleepCount number of keys.
int k = key.get();
for (int i = 0; i < value.get(); ++i) {
context.write(new IntWritable(k + i), NullWritable.get());
}
}
}
public static class SleepReducer
extends Reducer<IntWritable, NullWritable, NullWritable, NullWritable> {
private long reduceSleepDuration = 100;
private int reduceSleepCount = 1;
private int count = 0;
protected void setup(Context context)
throws IOException, InterruptedException {
Configuration conf = context.getConfiguration();
this.reduceSleepCount =
conf.getInt(REDUCE_SLEEP_COUNT, reduceSleepCount);
this.reduceSleepDuration = reduceSleepCount == 0 ? 0 :
conf.getLong(REDUCE_SLEEP_TIME , 100) / reduceSleepCount;
}
public void reduce(IntWritable key, Iterable<NullWritable> values,
Context context)
throws IOException {
try {
context.setStatus("Sleeping... (" +
(reduceSleepDuration * (reduceSleepCount - count)) + ") ms left");
Thread.sleep(reduceSleepDuration);
}
catch (InterruptedException ex) {
throw (IOException)new IOException(
"Interrupted while sleeping").initCause(ex);
}
count++;
}
}
public static void main(String[] args) throws Exception {
int res = ToolRunner.run(new Configuration(), new SleepJob(), args);
System.exit(res);
}
public Job createJob(int numMapper, int numReducer,
long mapSleepTime, int mapSleepCount,
long reduceSleepTime, int reduceSleepCount)
throws IOException {
Configuration conf = getConf();
conf.setLong(MAP_SLEEP_TIME, mapSleepTime);
conf.setLong(REDUCE_SLEEP_TIME, reduceSleepTime);
conf.setInt(MAP_SLEEP_COUNT, mapSleepCount);
conf.setInt(REDUCE_SLEEP_COUNT, reduceSleepCount);
conf.setInt(MRJobConfig.NUM_MAPS, numMapper);
Job job = Job.getInstance(conf, "sleep");
job.setNumReduceTasks(numReducer);
job.setJarByClass(SleepJob.class);
job.setMapperClass(SleepMapper.class);
job.setMapOutputKeyClass(IntWritable.class);
job.setMapOutputValueClass(NullWritable.class);
job.setReducerClass(SleepReducer.class);
job.setOutputFormatClass(NullOutputFormat.class);
job.setInputFormatClass(SleepInputFormat.class);
job.setPartitionerClass(SleepJobPartitioner.class);
job.setSpeculativeExecution(false);
job.setJobName("Sleep job");
FileInputFormat.addInputPath(job, new Path("ignored"));
return job;
}
public int run(String[] args) throws Exception {
if(args.length < 1) {
return printUsage("number of arguments must be > 0");
}
int numMapper = 1, numReducer = 1;
long mapSleepTime = 100, reduceSleepTime = 100, recSleepTime = 100;
int mapSleepCount = 1, reduceSleepCount = 1;
for(int i=0; i < args.length; i++ ) {
if(args[i].equals("-m")) {
numMapper = Integer.parseInt(args[++i]);
if (numMapper < 0) {
return printUsage(numMapper + ": numMapper must be >= 0");
}
}
else if(args[i].equals("-r")) {
numReducer = Integer.parseInt(args[++i]);
if (numReducer < 0) {
return printUsage(numReducer + ": numReducer must be >= 0");
}
}
else if(args[i].equals("-mt")) {
mapSleepTime = Long.parseLong(args[++i]);
if (mapSleepTime < 0) {
return printUsage(mapSleepTime + ": mapSleepTime must be >= 0");
}
}
else if(args[i].equals("-rt")) {
reduceSleepTime = Long.parseLong(args[++i]);
if (reduceSleepTime < 0) {
return printUsage(
reduceSleepTime + ": reduceSleepTime must be >= 0");
}
}
else if (args[i].equals("-recordt")) {
recSleepTime = Long.parseLong(args[++i]);
if (recSleepTime < 0) {
return printUsage(recSleepTime + ": recordSleepTime must be >= 0");
}
}
}
// sleep for *SleepTime duration in Task by recSleepTime per record
mapSleepCount = (int)Math.ceil(mapSleepTime / ((double)recSleepTime));
reduceSleepCount = (int)Math.ceil(reduceSleepTime / ((double)recSleepTime));
Job job = createJob(numMapper, numReducer, mapSleepTime,
mapSleepCount, reduceSleepTime, reduceSleepCount);
return job.waitForCompletion(true) ? 0 : 1;
}
private int printUsage(String error) {
if (error != null) {
System.err.println("ERROR: " + error);
}
System.err.println("SleepJob [-m numMapper] [-r numReducer]" +
" [-mt mapSleepTime (msec)] [-rt reduceSleepTime (msec)]" +
" [-recordt recordSleepTime (msec)]");
ToolRunner.printGenericCommandUsage(System.err);
return 2;
}
}
| 10,824 | 36.71777 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/filecache/TestURIFragments.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.filecache;
import java.net.URI;
import java.net.URISyntaxException;
import static org.junit.Assert.*;
import org.junit.Test;
public class TestURIFragments {
/**
* Tests {@link DistributedCache#checkURIs(URI[], URI[]).
*/
@Test
public void testURIs() throws URISyntaxException {
assertTrue(DistributedCache.checkURIs(null, null));
// uris with no fragments
assertFalse(DistributedCache.checkURIs(new URI[] { new URI(
"file://foo/bar/myCacheFile.txt") }, null));
assertFalse(DistributedCache.checkURIs(null,
new URI[] { new URI("file://foo/bar/myCacheArchive.txt") }));
assertFalse(DistributedCache.checkURIs(new URI[] {
new URI("file://foo/bar/myCacheFile1.txt#file"),
new URI("file://foo/bar/myCacheFile2.txt") }, null));
assertFalse(DistributedCache.checkURIs(null, new URI[] {
new URI("file://foo/bar/myCacheArchive1.txt"),
new URI("file://foo/bar/myCacheArchive2.txt#archive") }));
assertFalse(DistributedCache.checkURIs(new URI[] { new URI(
"file://foo/bar/myCacheFile.txt") }, new URI[] { new URI(
"file://foo/bar/myCacheArchive.txt") }));
// conflicts in fragment names
assertFalse(DistributedCache.checkURIs(new URI[] {
new URI("file://foo/bar/myCacheFile1.txt#file"),
new URI("file://foo/bar/myCacheFile2.txt#file") }, null));
assertFalse(DistributedCache.checkURIs(null, new URI[] {
new URI("file://foo/bar/myCacheArchive1.txt#archive"),
new URI("file://foo/bar/myCacheArchive2.txt#archive") }));
assertFalse(DistributedCache.checkURIs(new URI[] { new URI(
"file://foo/bar/myCacheFile.txt#cache") }, new URI[] { new URI(
"file://foo/bar/myCacheArchive.txt#cache") }));
assertFalse(DistributedCache.checkURIs(new URI[] {
new URI("file://foo/bar/myCacheFile1.txt#file1"),
new URI("file://foo/bar/myCacheFile2.txt#file2") }, new URI[] {
new URI("file://foo/bar/myCacheArchive1.txt#archive"),
new URI("file://foo/bar/myCacheArchive2.txt#archive") }));
assertFalse(DistributedCache.checkURIs(new URI[] {
new URI("file://foo/bar/myCacheFile1.txt#file"),
new URI("file://foo/bar/myCacheFile2.txt#file") }, new URI[] {
new URI("file://foo/bar/myCacheArchive1.txt#archive1"),
new URI("file://foo/bar/myCacheArchive2.txt#archive2") }));
assertFalse(DistributedCache.checkURIs(new URI[] {
new URI("file://foo/bar/myCacheFile1.txt#file1"),
new URI("file://foo/bar/myCacheFile2.txt#cache") }, new URI[] {
new URI("file://foo/bar/myCacheArchive1.txt#cache"),
new URI("file://foo/bar/myCacheArchive2.txt#archive2") }));
// test ignore case
assertFalse(DistributedCache.checkURIs(new URI[] {
new URI("file://foo/bar/myCacheFile1.txt#file"),
new URI("file://foo/bar/myCacheFile2.txt#FILE") }, null));
assertFalse(DistributedCache.checkURIs(null, new URI[] {
new URI("file://foo/bar/myCacheArchive1.txt#archive"),
new URI("file://foo/bar/myCacheArchive2.txt#ARCHIVE") }));
assertFalse(DistributedCache.checkURIs(new URI[] { new URI(
"file://foo/bar/myCacheFile.txt#cache") }, new URI[] { new URI(
"file://foo/bar/myCacheArchive.txt#CACHE") }));
assertFalse(DistributedCache.checkURIs(new URI[] {
new URI("file://foo/bar/myCacheFile1.txt#file1"),
new URI("file://foo/bar/myCacheFile2.txt#file2") }, new URI[] {
new URI("file://foo/bar/myCacheArchive1.txt#ARCHIVE"),
new URI("file://foo/bar/myCacheArchive2.txt#archive") }));
assertFalse(DistributedCache.checkURIs(new URI[] {
new URI("file://foo/bar/myCacheFile1.txt#FILE"),
new URI("file://foo/bar/myCacheFile2.txt#file") }, new URI[] {
new URI("file://foo/bar/myCacheArchive1.txt#archive1"),
new URI("file://foo/bar/myCacheArchive2.txt#archive2") }));
assertFalse(DistributedCache.checkURIs(new URI[] {
new URI("file://foo/bar/myCacheFile1.txt#file1"),
new URI("file://foo/bar/myCacheFile2.txt#CACHE") }, new URI[] {
new URI("file://foo/bar/myCacheArchive1.txt#cache"),
new URI("file://foo/bar/myCacheArchive2.txt#archive2") }));
// allowed uri combinations
assertTrue(DistributedCache.checkURIs(new URI[] {
new URI("file://foo/bar/myCacheFile1.txt#file1"),
new URI("file://foo/bar/myCacheFile2.txt#file2") }, null));
assertTrue(DistributedCache.checkURIs(null, new URI[] {
new URI("file://foo/bar/myCacheArchive1.txt#archive1"),
new URI("file://foo/bar/myCacheArchive2.txt#archive2") }));
assertTrue(DistributedCache.checkURIs(new URI[] {
new URI("file://foo/bar/myCacheFile1.txt#file1"),
new URI("file://foo/bar/myCacheFile2.txt#file2") }, new URI[] {
new URI("file://foo/bar/myCacheArchive1.txt#archive1"),
new URI("file://foo/bar/myCacheArchive2.txt#archive2") }));
}
}
| 5,797 | 48.982759 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/security/CredentialsTestJob.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.security;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.SleepJob;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.NullOutputFormat;
import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
/**
* class for testing transport of keys via Credentials .
* Client passes a list of keys in the Credentials object.
* The mapper and reducer checks whether it can access the keys
* from Credentials.
*/
public class CredentialsTestJob extends Configured implements Tool {
private static final int NUM_OF_KEYS = 10;
private static void checkSecrets(Credentials ts) {
if ( ts == null){
throw new RuntimeException("The credentials are not available");
// fail the test
}
for(int i=0; i<NUM_OF_KEYS; i++) {
String secretName = "alias"+i;
// get token storage and a key
byte[] secretValue = ts.getSecretKey(new Text(secretName));
System.out.println(secretValue);
if (secretValue == null){
throw new RuntimeException("The key "+ secretName + " is not available. ");
// fail the test
}
String secretValueStr = new String (secretValue);
if ( !("password"+i).equals(secretValueStr)){
throw new RuntimeException("The key "+ secretName +
" is not correct. Expected value is "+ ("password"+i) +
". Actual value is " + secretValueStr); // fail the test
}
}
}
public static class CredentialsTestMapper
extends Mapper<IntWritable, IntWritable, IntWritable, NullWritable> {
Credentials ts;
protected void setup(Context context)
throws IOException, InterruptedException {
ts = context.getCredentials();
}
public void map(IntWritable key, IntWritable value, Context context
) throws IOException, InterruptedException {
checkSecrets(ts);
}
}
public static class CredentialsTestReducer
extends Reducer<IntWritable, NullWritable, NullWritable, NullWritable> {
Credentials ts;
protected void setup(Context context)
throws IOException, InterruptedException {
ts = context.getCredentials();
}
public void reduce(IntWritable key, Iterable<NullWritable> values,
Context context)
throws IOException {
checkSecrets(ts);
}
}
public static void main(String[] args) throws Exception {
int res = ToolRunner.run(new Configuration(), new CredentialsTestJob(), args);
System.exit(res);
}
public Job createJob()
throws IOException {
Configuration conf = getConf();
conf.setInt(MRJobConfig.NUM_MAPS, 1);
Job job = Job.getInstance(conf, "test");
job.setNumReduceTasks(1);
job.setJarByClass(CredentialsTestJob.class);
job.setNumReduceTasks(1);
job.setMapperClass(CredentialsTestJob.CredentialsTestMapper.class);
job.setMapOutputKeyClass(IntWritable.class);
job.setMapOutputValueClass(NullWritable.class);
job.setReducerClass(CredentialsTestJob.CredentialsTestReducer.class);
job.setInputFormatClass(SleepJob.SleepInputFormat.class);
job.setPartitionerClass(SleepJob.SleepJobPartitioner.class);
job.setOutputFormatClass(NullOutputFormat.class);
job.setSpeculativeExecution(false);
job.setJobName("test job");
FileInputFormat.addInputPath(job, new Path("ignored"));
return job;
}
public int run(String[] args) throws Exception {
Job job = createJob();
return job.waitForCompletion(true) ? 0 : 1;
}
}
| 4,833 | 32.804196 | 84 |
java
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.