repo
stringlengths 1
191
⌀ | file
stringlengths 23
351
| code
stringlengths 0
5.32M
| file_length
int64 0
5.32M
| avg_line_length
float64 0
2.9k
| max_line_length
int64 0
288k
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/startupprogress/Phase.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.hadoop.hdfs.server.namenode.startupprogress;
import org.apache.hadoop.classification.InterfaceAudience;
/**
* Indicates a particular phase of the namenode startup sequence. The phases
* are listed here in their execution order.
*/
@InterfaceAudience.Private
public enum Phase {
/**
* The namenode is loading the fsimage file into memory.
*/
LOADING_FSIMAGE("LoadingFsImage", "Loading fsimage"),
/**
* The namenode is loading the edits file and applying its operations to the
* in-memory metadata.
*/
LOADING_EDITS("LoadingEdits", "Loading edits"),
/**
* The namenode is saving a new checkpoint.
*/
SAVING_CHECKPOINT("SavingCheckpoint", "Saving checkpoint"),
/**
* The namenode has entered safemode, awaiting block reports from data nodes.
*/
SAFEMODE("SafeMode", "Safe mode");
private final String name, description;
/**
* Returns phase description.
*
* @return String description
*/
public String getDescription() {
return description;
}
/**
* Returns phase name.
*
* @return String phase name
*/
public String getName() {
return name;
}
/**
* Private constructor of enum.
*
* @param name String phase name
* @param description String phase description
*/
private Phase(String name, String description) {
this.name = name;
this.description = description;
}
}
| 2,212 | 27.012658 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/startupprogress/Status.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.hadoop.hdfs.server.namenode.startupprogress;
import org.apache.hadoop.classification.InterfaceAudience;
/**
* Indicates run status of a {@link Phase}.
*/
@InterfaceAudience.Private
public enum Status {
/**
* The phase has not yet started running.
*/
PENDING,
/**
* The phase is running right now.
*/
RUNNING,
/**
* The phase has already completed.
*/
COMPLETE
}
| 1,216 | 28.682927 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/startupprogress/StepType.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.hadoop.hdfs.server.namenode.startupprogress;
import org.apache.hadoop.classification.InterfaceAudience;
/**
* Indicates a particular type of {@link Step}.
*/
@InterfaceAudience.Private
public enum StepType {
/**
* The namenode has entered safemode and is awaiting block reports from
* datanodes.
*/
AWAITING_REPORTED_BLOCKS("AwaitingReportedBlocks", "awaiting reported blocks"),
/**
* The namenode is performing an operation related to delegation keys.
*/
DELEGATION_KEYS("DelegationKeys", "delegation keys"),
/**
* The namenode is performing an operation related to delegation tokens.
*/
DELEGATION_TOKENS("DelegationTokens", "delegation tokens"),
/**
* The namenode is performing an operation related to inodes.
*/
INODES("Inodes", "inodes"),
/**
* The namenode is performing an operation related to cache pools.
*/
CACHE_POOLS("CachePools", "cache pools"),
/**
* The namenode is performing an operation related to cache entries.
*/
CACHE_ENTRIES("CacheEntries", "cache entries");
private final String name, description;
/**
* Private constructor of enum.
*
* @param name String step type name
* @param description String step type description
*/
private StepType(String name, String description) {
this.name = name;
this.description = description;
}
/**
* Returns step type description.
*
* @return String step type description
*/
public String getDescription() {
return description;
}
/**
* Returns step type name.
*
* @return String step type name
*/
public String getName() {
return name;
}
}
| 2,471 | 27.090909 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/startupprogress/StepTracking.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.hadoop.hdfs.server.namenode.startupprogress;
import java.util.concurrent.atomic.AtomicLong;
import org.apache.hadoop.classification.InterfaceAudience;
/**
* Internal data structure used to track progress of a {@link Step}.
*/
@InterfaceAudience.Private
final class StepTracking extends AbstractTracking {
AtomicLong count = new AtomicLong();
long total = Long.MIN_VALUE;
@Override
public StepTracking clone() {
StepTracking clone = new StepTracking();
super.copy(clone);
clone.count = new AtomicLong(count.get());
clone.total = total;
return clone;
}
}
| 1,407 | 34.2 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/startupprogress/Step.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.hadoop.hdfs.server.namenode.startupprogress;
import java.util.concurrent.atomic.AtomicInteger;
import org.apache.commons.lang.builder.CompareToBuilder;
import org.apache.commons.lang.builder.EqualsBuilder;
import org.apache.commons.lang.builder.HashCodeBuilder;
import org.apache.hadoop.classification.InterfaceAudience;
/**
* A step performed by the namenode during a {@link Phase} of startup.
*/
@InterfaceAudience.Private
public class Step implements Comparable<Step> {
private static final AtomicInteger SEQUENCE = new AtomicInteger();
private final String file;
private final int sequenceNumber;
private final long size;
private final StepType type;
/**
* Creates a new Step.
*
* @param type StepType type of step
*/
public Step(StepType type) {
this(type, null, Long.MIN_VALUE);
}
/**
* Creates a new Step.
*
* @param file String file
*/
public Step(String file) {
this(null, file, Long.MIN_VALUE);
}
/**
* Creates a new Step.
*
* @param file String file
* @param size long size in bytes
*/
public Step(String file, long size) {
this(null, file, size);
}
/**
* Creates a new Step.
*
* @param type StepType type of step
* @param file String file
*/
public Step(StepType type, String file) {
this(type, file, Long.MIN_VALUE);
}
/**
* Creates a new Step.
*
* @param type StepType type of step
* @param file String file
* @param size long size in bytes
*/
public Step(StepType type, String file, long size) {
this.file = file;
this.sequenceNumber = SEQUENCE.incrementAndGet();
this.size = size;
this.type = type;
}
@Override
public int compareTo(Step other) {
// Sort steps by file and then sequentially within the file to achieve the
// desired order. There is no concurrent map structure in the JDK that
// maintains insertion order, so instead we attach a sequence number to each
// step and sort on read.
return new CompareToBuilder().append(file, other.file)
.append(sequenceNumber, other.sequenceNumber).toComparison();
}
@Override
public boolean equals(Object otherObj) {
if (otherObj == null || otherObj.getClass() != getClass()) {
return false;
}
Step other = (Step)otherObj;
return new EqualsBuilder().append(this.file, other.file)
.append(this.size, other.size).append(this.type, other.type).isEquals();
}
/**
* Returns the optional file name, possibly null.
*
* @return String optional file name, possibly null
*/
public String getFile() {
return file;
}
/**
* Returns the optional size in bytes, possibly Long.MIN_VALUE if undefined.
*
* @return long optional size in bytes, possibly Long.MIN_VALUE
*/
public long getSize() {
return size;
}
/**
* Returns the optional step type, possibly null.
*
* @return StepType optional step type, possibly null
*/
public StepType getType() {
return type;
}
@Override
public int hashCode() {
return new HashCodeBuilder().append(file).append(size).append(type)
.toHashCode();
}
}
| 3,967 | 26.748252 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/startupprogress/AbstractTracking.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.hadoop.hdfs.server.namenode.startupprogress;
import org.apache.hadoop.classification.InterfaceAudience;
/**
* Abstract base of internal data structures used for tracking progress. For
* primitive long properties, {@link Long#MIN_VALUE} is used as a sentinel value
* to indicate that the property is undefined.
*/
@InterfaceAudience.Private
abstract class AbstractTracking implements Cloneable {
long beginTime = Long.MIN_VALUE;
long endTime = Long.MIN_VALUE;
/**
* Subclass instances may call this method during cloning to copy the values of
* all properties stored in this base class.
*
* @param dest AbstractTracking destination for copying properties
*/
protected void copy(AbstractTracking dest) {
dest.beginTime = beginTime;
dest.endTime = endTime;
}
}
| 1,618 | 37.547619 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/startupprogress/StartupProgressMetrics.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode.startupprogress;
import static org.apache.hadoop.metrics2.lib.Interns.info;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hdfs.server.namenode.startupprogress.Phase;
import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgressView;
import org.apache.hadoop.metrics2.MetricsCollector;
import org.apache.hadoop.metrics2.MetricsInfo;
import org.apache.hadoop.metrics2.MetricsRecordBuilder;
import org.apache.hadoop.metrics2.MetricsSource;
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
/**
* Links {@link StartupProgress} to a {@link MetricsSource} to expose its
* information via JMX.
*/
@InterfaceAudience.Private
public class StartupProgressMetrics implements MetricsSource {
private static final MetricsInfo STARTUP_PROGRESS_METRICS_INFO =
info("StartupProgress", "NameNode startup progress");
private final StartupProgress startupProgress;
/**
* Registers StartupProgressMetrics linked to the given StartupProgress.
*
* @param prog StartupProgress to link
*/
public static void register(StartupProgress prog) {
new StartupProgressMetrics(prog);
}
/**
* Creates a new StartupProgressMetrics registered with the metrics system.
*
* @param startupProgress StartupProgress to link
*/
public StartupProgressMetrics(StartupProgress startupProgress) {
this.startupProgress = startupProgress;
DefaultMetricsSystem.instance().register(
STARTUP_PROGRESS_METRICS_INFO.name(),
STARTUP_PROGRESS_METRICS_INFO.description(), this);
}
@Override
public void getMetrics(MetricsCollector collector, boolean all) {
StartupProgressView prog = startupProgress.createView();
MetricsRecordBuilder builder = collector.addRecord(
STARTUP_PROGRESS_METRICS_INFO);
builder.addCounter(info("ElapsedTime", "overall elapsed time"),
prog.getElapsedTime());
builder.addGauge(info("PercentComplete", "overall percent complete"),
prog.getPercentComplete());
for (Phase phase: prog.getPhases()) {
addCounter(builder, phase, "Count", " count", prog.getCount(phase));
addCounter(builder, phase, "ElapsedTime", " elapsed time",
prog.getElapsedTime(phase));
addCounter(builder, phase, "Total", " total", prog.getTotal(phase));
addGauge(builder, phase, "PercentComplete", " percent complete",
prog.getPercentComplete(phase));
}
}
/**
* Adds a counter with a name built by using the specified phase's name as
* prefix and then appending the specified suffix.
*
* @param builder MetricsRecordBuilder to receive counter
* @param phase Phase to add
* @param nameSuffix String suffix of metric name
* @param descSuffix String suffix of metric description
* @param value long counter value
*/
private static void addCounter(MetricsRecordBuilder builder, Phase phase,
String nameSuffix, String descSuffix, long value) {
MetricsInfo metricsInfo = info(phase.getName() + nameSuffix,
phase.getDescription() + descSuffix);
builder.addCounter(metricsInfo, value);
}
/**
* Adds a gauge with a name built by using the specified phase's name as prefix
* and then appending the specified suffix.
*
* @param builder MetricsRecordBuilder to receive counter
* @param phase Phase to add
* @param nameSuffix String suffix of metric name
* @param descSuffix String suffix of metric description
* @param value float gauge value
*/
private static void addGauge(MetricsRecordBuilder builder, Phase phase,
String nameSuffix, String descSuffix, float value) {
MetricsInfo metricsInfo = info(phase.getName() + nameSuffix,
phase.getDescription() + descSuffix);
builder.addGauge(metricsInfo, value);
}
}
| 4,637 | 37.97479 | 82 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/startupprogress/StartupProgressView.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.hadoop.hdfs.server.namenode.startupprogress;
import java.util.EnumSet;
import java.util.HashMap;
import java.util.Map;
import java.util.TreeSet;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.util.Time;
/**
* StartupProgressView is an immutable, consistent, read-only view of namenode
* startup progress. Callers obtain an instance by calling
* {@link StartupProgress#createView()} to clone current startup progress state.
* Subsequent updates to startup progress will not alter the view. This isolates
* the reader from ongoing updates and establishes a guarantee that the values
* returned by the view are consistent and unchanging across multiple related
* read operations. Calculations that require aggregation, such as overall
* percent complete, will not be impacted by mutations performed in other threads
* mid-way through the calculation.
*
* Methods that return primitive long may return {@link Long#MIN_VALUE} as a
* sentinel value to indicate that the property is undefined.
*/
@InterfaceAudience.Private
public class StartupProgressView {
private final Map<Phase, PhaseTracking> phases;
/**
* Returns the sum of the counter values for all steps in the specified phase.
*
* @param phase Phase to get
* @return long sum of counter values for all steps
*/
public long getCount(Phase phase) {
long sum = 0;
for (Step step: getSteps(phase)) {
sum += getCount(phase, step);
}
return sum;
}
/**
* Returns the counter value for the specified phase and step.
*
* @param phase Phase to get
* @param step Step to get
* @return long counter value for phase and step
*/
public long getCount(Phase phase, Step step) {
StepTracking tracking = getStepTracking(phase, step);
return tracking != null ? tracking.count.get() : 0;
}
/**
* Returns overall elapsed time, calculated as time between start of loading
* fsimage and end of safemode.
*
* @return long elapsed time
*/
public long getElapsedTime() {
return getElapsedTime(phases.get(Phase.LOADING_FSIMAGE),
phases.get(Phase.SAFEMODE));
}
/**
* Returns elapsed time for the specified phase, calculated as (end - begin) if
* phase is complete or (now - begin) if phase is running or 0 if the phase is
* still pending.
*
* @param phase Phase to get
* @return long elapsed time
*/
public long getElapsedTime(Phase phase) {
return getElapsedTime(phases.get(phase));
}
/**
* Returns elapsed time for the specified phase and step, calculated as
* (end - begin) if step is complete or (now - begin) if step is running or 0
* if the step is still pending.
*
* @param phase Phase to get
* @param step Step to get
* @return long elapsed time
*/
public long getElapsedTime(Phase phase, Step step) {
return getElapsedTime(getStepTracking(phase, step));
}
/**
* Returns the optional file name associated with the specified phase, possibly
* null.
*
* @param phase Phase to get
* @return String optional file name, possibly null
*/
public String getFile(Phase phase) {
return phases.get(phase).file;
}
/**
* Returns overall percent complete, calculated by aggregating percent complete
* of all phases. This is an approximation that assumes all phases have equal
* running time. In practice, this isn't true, but there isn't sufficient
* information available to predict proportional weights for each phase.
*
* @return float percent complete
*/
public float getPercentComplete() {
if (getStatus(Phase.SAFEMODE) == Status.COMPLETE) {
return 1.0f;
} else {
float total = 0.0f;
int numPhases = 0;
for (Phase phase: phases.keySet()) {
++numPhases;
total += getPercentComplete(phase);
}
return getBoundedPercent(total / numPhases);
}
}
/**
* Returns percent complete for the specified phase, calculated by aggregating
* the counter values and totals for all steps within the phase.
*
* @param phase Phase to get
* @return float percent complete
*/
public float getPercentComplete(Phase phase) {
if (getStatus(phase) == Status.COMPLETE) {
return 1.0f;
} else {
long total = getTotal(phase);
long count = 0;
for (Step step: getSteps(phase)) {
count += getCount(phase, step);
}
return total > 0 ? getBoundedPercent(1.0f * count / total) : 0.0f;
}
}
/**
* Returns percent complete for the specified phase and step, calculated as
* counter value divided by total.
*
* @param phase Phase to get
* @param step Step to get
* @return float percent complete
*/
public float getPercentComplete(Phase phase, Step step) {
if (getStatus(phase) == Status.COMPLETE) {
return 1.0f;
} else {
long total = getTotal(phase, step);
long count = getCount(phase, step);
return total > 0 ? getBoundedPercent(1.0f * count / total) : 0.0f;
}
}
/**
* Returns all phases.
*
* @return Iterable<Phase> containing all phases
*/
public Iterable<Phase> getPhases() {
return EnumSet.allOf(Phase.class);
}
/**
* Returns all steps within a phase.
*
* @param phase Phase to get
* @return Iterable<Step> all steps
*/
public Iterable<Step> getSteps(Phase phase) {
return new TreeSet<Step>(phases.get(phase).steps.keySet());
}
/**
* Returns the optional size in bytes associated with the specified phase,
* possibly Long.MIN_VALUE if undefined.
*
* @param phase Phase to get
* @return long optional size in bytes, possibly Long.MIN_VALUE
*/
public long getSize(Phase phase) {
return phases.get(phase).size;
}
/**
* Returns the current run status of the specified phase.
*
* @param phase Phase to get
* @return Status run status of phase
*/
public Status getStatus(Phase phase) {
PhaseTracking tracking = phases.get(phase);
if (tracking.beginTime == Long.MIN_VALUE) {
return Status.PENDING;
} else if (tracking.endTime == Long.MIN_VALUE) {
return Status.RUNNING;
} else {
return Status.COMPLETE;
}
}
/**
* Returns the sum of the totals for all steps in the specified phase.
*
* @param phase Phase to get
* @return long sum of totals for all steps
*/
public long getTotal(Phase phase) {
long sum = 0;
for (StepTracking tracking: phases.get(phase).steps.values()) {
if (tracking.total != Long.MIN_VALUE) {
sum += tracking.total;
}
}
return sum;
}
/**
* Returns the total for the specified phase and step.
*
* @param phase Phase to get
* @param step Step to get
* @return long total
*/
public long getTotal(Phase phase, Step step) {
StepTracking tracking = getStepTracking(phase, step);
return tracking != null && tracking.total != Long.MIN_VALUE ?
tracking.total : 0;
}
/**
* Creates a new StartupProgressView by cloning data from the specified
* StartupProgress.
*
* @param prog StartupProgress to clone
*/
StartupProgressView(StartupProgress prog) {
phases = new HashMap<Phase, PhaseTracking>();
for (Map.Entry<Phase, PhaseTracking> entry: prog.phases.entrySet()) {
phases.put(entry.getKey(), entry.getValue().clone());
}
}
/**
* Returns elapsed time, calculated as (end - begin) if both are defined or
* (now - begin) if end is undefined or 0 if both are undefined. Begin and end
* time come from the same AbstractTracking instance.
*
* @param tracking AbstractTracking containing begin and end time
* @return long elapsed time
*/
private long getElapsedTime(AbstractTracking tracking) {
return getElapsedTime(tracking, tracking);
}
/**
* Returns elapsed time, calculated as (end - begin) if both are defined or
* (now - begin) if end is undefined or 0 if both are undefined. Begin and end
* time may come from different AbstractTracking instances.
*
* @param beginTracking AbstractTracking containing begin time
* @param endTracking AbstractTracking containing end time
* @return long elapsed time
*/
private long getElapsedTime(AbstractTracking beginTracking,
AbstractTracking endTracking) {
final long elapsed;
if (beginTracking != null && beginTracking.beginTime != Long.MIN_VALUE &&
endTracking != null && endTracking.endTime != Long.MIN_VALUE) {
elapsed = endTracking.endTime - beginTracking.beginTime;
} else if (beginTracking != null &&
beginTracking.beginTime != Long.MIN_VALUE) {
elapsed = Time.monotonicNow() - beginTracking.beginTime;
} else {
elapsed = 0;
}
return Math.max(0, elapsed);
}
/**
* Returns the StepTracking internal data structure for the specified phase
* and step, possibly null if not found.
*
* @param phase Phase to get
* @param step Step to get
* @return StepTracking for phase and step, possibly null
*/
private StepTracking getStepTracking(Phase phase, Step step) {
PhaseTracking phaseTracking = phases.get(phase);
Map<Step, StepTracking> steps = phaseTracking != null ?
phaseTracking.steps : null;
return steps != null ? steps.get(step) : null;
}
/**
* Returns the given value restricted to the range [0.0, 1.0].
*
* @param percent float value to restrict
* @return float value restricted to range [0.0, 1.0]
*/
private static float getBoundedPercent(float percent) {
return Math.max(0.0f, Math.min(1.0f, percent));
}
}
| 10,462 | 30.802432 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/startupprogress/PhaseTracking.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.hadoop.hdfs.server.namenode.startupprogress;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import org.apache.hadoop.classification.InterfaceAudience;
/**
* Internal data structure used to track progress of a {@link Phase}.
*/
@InterfaceAudience.Private
final class PhaseTracking extends AbstractTracking {
String file;
long size = Long.MIN_VALUE;
final ConcurrentMap<Step, StepTracking> steps =
new ConcurrentHashMap<Step, StepTracking>();
@Override
public PhaseTracking clone() {
PhaseTracking clone = new PhaseTracking();
super.copy(clone);
clone.file = file;
clone.size = size;
for (Map.Entry<Step, StepTracking> entry: steps.entrySet()) {
clone.steps.put(entry.getKey(), entry.getValue().clone());
}
return clone;
}
}
| 1,662 | 34.382979 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FSImageFormatPBSnapshot.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode.snapshot;
import static org.apache.hadoop.hdfs.server.namenode.FSImageFormatPBINode.Loader.loadINodeDirectory;
import static org.apache.hadoop.hdfs.server.namenode.FSImageFormatPBINode.Loader.loadPermission;
import static org.apache.hadoop.hdfs.server.namenode.FSImageFormatPBINode.Loader.updateBlocksMap;
import static org.apache.hadoop.hdfs.server.namenode.FSImageFormatPBINode.Saver.buildINodeDirectory;
import static org.apache.hadoop.hdfs.server.namenode.FSImageFormatPBINode.Saver.buildINodeFile;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Comparator;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import com.google.common.collect.ImmutableList;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto;
import org.apache.hadoop.hdfs.protocolPB.PBHelper;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
import org.apache.hadoop.hdfs.server.namenode.AclEntryStatusFormat;
import org.apache.hadoop.hdfs.server.namenode.AclFeature;
import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
import org.apache.hadoop.hdfs.server.namenode.FSImageFormatPBINode;
import org.apache.hadoop.hdfs.server.namenode.FSImageFormatProtobuf;
import org.apache.hadoop.hdfs.server.namenode.FSImageFormatProtobuf.LoaderContext;
import org.apache.hadoop.hdfs.server.namenode.FSImageFormatProtobuf.SectionName;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary;
import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection;
import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection;
import org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection;
import org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.CreatedListEntry;
import org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DiffEntry.Type;
import org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection;
import org.apache.hadoop.hdfs.server.namenode.INode;
import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
import org.apache.hadoop.hdfs.server.namenode.INodeDirectoryAttributes;
import org.apache.hadoop.hdfs.server.namenode.INodeFile;
import org.apache.hadoop.hdfs.server.namenode.INodeFileAttributes;
import org.apache.hadoop.hdfs.server.namenode.INodeMap;
import org.apache.hadoop.hdfs.server.namenode.INodeReference;
import org.apache.hadoop.hdfs.server.namenode.INodeReference.DstReference;
import org.apache.hadoop.hdfs.server.namenode.INodeReference.WithCount;
import org.apache.hadoop.hdfs.server.namenode.INodeReference.WithName;
import org.apache.hadoop.hdfs.server.namenode.INodeWithAdditionalFields;
import org.apache.hadoop.hdfs.server.namenode.QuotaByStorageTypeEntry;
import org.apache.hadoop.hdfs.server.namenode.SaveNamespaceContext;
import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.DirectoryDiff;
import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.DirectoryDiffList;
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot.Root;
import org.apache.hadoop.hdfs.server.namenode.XAttrFeature;
import org.apache.hadoop.hdfs.util.Diff.ListType;
import org.apache.hadoop.hdfs.util.EnumCounters;
import com.google.common.base.Preconditions;
import com.google.protobuf.ByteString;
@InterfaceAudience.Private
public class FSImageFormatPBSnapshot {
/**
* Loading snapshot related information from protobuf based FSImage
*/
public final static class Loader {
private final FSNamesystem fsn;
private final FSDirectory fsDir;
private final FSImageFormatProtobuf.Loader parent;
private final Map<Integer, Snapshot> snapshotMap;
public Loader(FSNamesystem fsn, FSImageFormatProtobuf.Loader parent) {
this.fsn = fsn;
this.fsDir = fsn.getFSDirectory();
this.snapshotMap = new HashMap<Integer, Snapshot>();
this.parent = parent;
}
/**
* The sequence of the ref node in refList must be strictly the same with
* the sequence in fsimage
*/
public void loadINodeReferenceSection(InputStream in) throws IOException {
final List<INodeReference> refList = parent.getLoaderContext()
.getRefList();
while (true) {
INodeReferenceSection.INodeReference e = INodeReferenceSection
.INodeReference.parseDelimitedFrom(in);
if (e == null) {
break;
}
INodeReference ref = loadINodeReference(e);
refList.add(ref);
}
}
private INodeReference loadINodeReference(
INodeReferenceSection.INodeReference r) throws IOException {
long referredId = r.getReferredId();
INode referred = fsDir.getInode(referredId);
WithCount withCount = (WithCount) referred.getParentReference();
if (withCount == null) {
withCount = new INodeReference.WithCount(null, referred);
}
final INodeReference ref;
if (r.hasDstSnapshotId()) { // DstReference
ref = new INodeReference.DstReference(null, withCount,
r.getDstSnapshotId());
} else {
ref = new INodeReference.WithName(null, withCount, r.getName()
.toByteArray(), r.getLastSnapshotId());
}
return ref;
}
/**
* Load the snapshots section from fsimage. Also add snapshottable feature
* to snapshottable directories.
*/
public void loadSnapshotSection(InputStream in) throws IOException {
SnapshotManager sm = fsn.getSnapshotManager();
SnapshotSection section = SnapshotSection.parseDelimitedFrom(in);
int snum = section.getNumSnapshots();
sm.setNumSnapshots(snum);
sm.setSnapshotCounter(section.getSnapshotCounter());
for (long sdirId : section.getSnapshottableDirList()) {
INodeDirectory dir = fsDir.getInode(sdirId).asDirectory();
if (!dir.isSnapshottable()) {
dir.addSnapshottableFeature();
} else {
// dir is root, and admin set root to snapshottable before
dir.setSnapshotQuota(DirectorySnapshottableFeature.SNAPSHOT_LIMIT);
}
sm.addSnapshottable(dir);
}
loadSnapshots(in, snum);
}
private void loadSnapshots(InputStream in, int size) throws IOException {
for (int i = 0; i < size; i++) {
SnapshotSection.Snapshot pbs = SnapshotSection.Snapshot
.parseDelimitedFrom(in);
INodeDirectory root = loadINodeDirectory(pbs.getRoot(),
parent.getLoaderContext());
int sid = pbs.getSnapshotId();
INodeDirectory parent = fsDir.getInode(root.getId()).asDirectory();
Snapshot snapshot = new Snapshot(sid, root, parent);
// add the snapshot to parent, since we follow the sequence of
// snapshotsByNames when saving, we do not need to sort when loading
parent.getDirectorySnapshottableFeature().addSnapshot(snapshot);
snapshotMap.put(sid, snapshot);
}
}
/**
* Load the snapshot diff section from fsimage.
*/
public void loadSnapshotDiffSection(InputStream in) throws IOException {
final List<INodeReference> refList = parent.getLoaderContext()
.getRefList();
while (true) {
SnapshotDiffSection.DiffEntry entry = SnapshotDiffSection.DiffEntry
.parseDelimitedFrom(in);
if (entry == null) {
break;
}
long inodeId = entry.getInodeId();
INode inode = fsDir.getInode(inodeId);
SnapshotDiffSection.DiffEntry.Type type = entry.getType();
switch (type) {
case FILEDIFF:
loadFileDiffList(in, inode.asFile(), entry.getNumOfDiff());
break;
case DIRECTORYDIFF:
loadDirectoryDiffList(in, inode.asDirectory(), entry.getNumOfDiff(),
refList);
break;
}
}
}
/** Load FileDiff list for a file with snapshot feature */
private void loadFileDiffList(InputStream in, INodeFile file, int size)
throws IOException {
final FileDiffList diffs = new FileDiffList();
final LoaderContext state = parent.getLoaderContext();
for (int i = 0; i < size; i++) {
SnapshotDiffSection.FileDiff pbf = SnapshotDiffSection.FileDiff
.parseDelimitedFrom(in);
INodeFileAttributes copy = null;
if (pbf.hasSnapshotCopy()) {
INodeSection.INodeFile fileInPb = pbf.getSnapshotCopy();
PermissionStatus permission = loadPermission(
fileInPb.getPermission(), state.getStringTable());
AclFeature acl = null;
if (fileInPb.hasAcl()) {
int[] entries = AclEntryStatusFormat
.toInt(FSImageFormatPBINode.Loader.loadAclEntries(
fileInPb.getAcl(), state.getStringTable()));
acl = new AclFeature(entries);
}
XAttrFeature xAttrs = null;
if (fileInPb.hasXAttrs()) {
xAttrs = new XAttrFeature(FSImageFormatPBINode.Loader.loadXAttrs(
fileInPb.getXAttrs(), state.getStringTable()));
}
copy = new INodeFileAttributes.SnapshotCopy(pbf.getName()
.toByteArray(), permission, acl, fileInPb.getModificationTime(),
fileInPb.getAccessTime(), (short) fileInPb.getReplication(),
fileInPb.getPreferredBlockSize(),
(byte)fileInPb.getStoragePolicyID(),
xAttrs);
}
FileDiff diff = new FileDiff(pbf.getSnapshotId(), copy, null,
pbf.getFileSize());
List<BlockProto> bpl = pbf.getBlocksList();
BlockInfo[] blocks = new BlockInfo[bpl.size()];
for(int j = 0, e = bpl.size(); j < e; ++j) {
Block blk = PBHelper.convert(bpl.get(j));
BlockInfo storedBlock = fsn.getBlockManager().getStoredBlock(blk);
if(storedBlock == null) {
storedBlock = fsn.getBlockManager().addBlockCollection(
new BlockInfoContiguous(blk, copy.getFileReplication()), file);
}
blocks[j] = storedBlock;
}
if(blocks.length > 0) {
diff.setBlocks(blocks);
}
diffs.addFirst(diff);
}
file.addSnapshotFeature(diffs);
}
/** Load the created list in a DirectoryDiff */
private List<INode> loadCreatedList(InputStream in, INodeDirectory dir,
int size) throws IOException {
List<INode> clist = new ArrayList<INode>(size);
for (long c = 0; c < size; c++) {
CreatedListEntry entry = CreatedListEntry.parseDelimitedFrom(in);
INode created = SnapshotFSImageFormat.loadCreated(entry.getName()
.toByteArray(), dir);
clist.add(created);
}
return clist;
}
private void addToDeletedList(INode dnode, INodeDirectory parent) {
dnode.setParent(parent);
if (dnode.isFile()) {
updateBlocksMap(dnode.asFile(), fsn.getBlockManager());
}
}
/**
* Load the deleted list in a DirectoryDiff
*/
private List<INode> loadDeletedList(final List<INodeReference> refList,
InputStream in, INodeDirectory dir, List<Long> deletedNodes,
List<Integer> deletedRefNodes)
throws IOException {
List<INode> dlist = new ArrayList<INode>(deletedRefNodes.size()
+ deletedNodes.size());
// load non-reference inodes
for (long deletedId : deletedNodes) {
INode deleted = fsDir.getInode(deletedId);
dlist.add(deleted);
addToDeletedList(deleted, dir);
}
// load reference nodes in the deleted list
for (int refId : deletedRefNodes) {
INodeReference deletedRef = refList.get(refId);
dlist.add(deletedRef);
addToDeletedList(deletedRef, dir);
}
Collections.sort(dlist, new Comparator<INode>() {
@Override
public int compare(INode n1, INode n2) {
return n1.compareTo(n2.getLocalNameBytes());
}
});
return dlist;
}
/** Load DirectoryDiff list for a directory with snapshot feature */
private void loadDirectoryDiffList(InputStream in, INodeDirectory dir,
int size, final List<INodeReference> refList) throws IOException {
if (!dir.isWithSnapshot()) {
dir.addSnapshotFeature(null);
}
DirectoryDiffList diffs = dir.getDiffs();
final LoaderContext state = parent.getLoaderContext();
for (int i = 0; i < size; i++) {
// load a directory diff
SnapshotDiffSection.DirectoryDiff diffInPb = SnapshotDiffSection.
DirectoryDiff.parseDelimitedFrom(in);
final int snapshotId = diffInPb.getSnapshotId();
final Snapshot snapshot = snapshotMap.get(snapshotId);
int childrenSize = diffInPb.getChildrenSize();
boolean useRoot = diffInPb.getIsSnapshotRoot();
INodeDirectoryAttributes copy = null;
if (useRoot) {
copy = snapshot.getRoot();
} else if (diffInPb.hasSnapshotCopy()) {
INodeSection.INodeDirectory dirCopyInPb = diffInPb.getSnapshotCopy();
final byte[] name = diffInPb.getName().toByteArray();
PermissionStatus permission = loadPermission(
dirCopyInPb.getPermission(), state.getStringTable());
AclFeature acl = null;
if (dirCopyInPb.hasAcl()) {
int[] entries = AclEntryStatusFormat
.toInt(FSImageFormatPBINode.Loader.loadAclEntries(
dirCopyInPb.getAcl(), state.getStringTable()));
acl = new AclFeature(entries);
}
XAttrFeature xAttrs = null;
if (dirCopyInPb.hasXAttrs()) {
xAttrs = new XAttrFeature(FSImageFormatPBINode.Loader.loadXAttrs(
dirCopyInPb.getXAttrs(), state.getStringTable()));
}
long modTime = dirCopyInPb.getModificationTime();
boolean noQuota = dirCopyInPb.getNsQuota() == -1
&& dirCopyInPb.getDsQuota() == -1
&& (!dirCopyInPb.hasTypeQuotas());
if (noQuota) {
copy = new INodeDirectoryAttributes.SnapshotCopy(name,
permission, acl, modTime, xAttrs);
} else {
EnumCounters<StorageType> typeQuotas = null;
if (dirCopyInPb.hasTypeQuotas()) {
ImmutableList<QuotaByStorageTypeEntry> qes =
FSImageFormatPBINode.Loader.loadQuotaByStorageTypeEntries(
dirCopyInPb.getTypeQuotas());
typeQuotas = new EnumCounters<StorageType>(StorageType.class,
HdfsConstants.QUOTA_RESET);
for (QuotaByStorageTypeEntry qe : qes) {
if (qe.getQuota() >= 0 && qe.getStorageType() != null &&
qe.getStorageType().supportTypeQuota()) {
typeQuotas.set(qe.getStorageType(), qe.getQuota());
}
}
}
copy = new INodeDirectoryAttributes.CopyWithQuota(name, permission,
acl, modTime, dirCopyInPb.getNsQuota(),
dirCopyInPb.getDsQuota(), typeQuotas, xAttrs);
}
}
// load created list
List<INode> clist = loadCreatedList(in, dir,
diffInPb.getCreatedListSize());
// load deleted list
List<INode> dlist = loadDeletedList(refList, in, dir,
diffInPb.getDeletedINodeList(), diffInPb.getDeletedINodeRefList());
// create the directory diff
DirectoryDiff diff = new DirectoryDiff(snapshotId, copy, null,
childrenSize, clist, dlist, useRoot);
diffs.addFirst(diff);
}
}
}
/**
* Saving snapshot related information to protobuf based FSImage
*/
public final static class Saver {
private final FSNamesystem fsn;
private final FileSummary.Builder headers;
private final FSImageFormatProtobuf.Saver parent;
private final SaveNamespaceContext context;
public Saver(FSImageFormatProtobuf.Saver parent,
FileSummary.Builder headers, SaveNamespaceContext context,
FSNamesystem fsn) {
this.parent = parent;
this.headers = headers;
this.context = context;
this.fsn = fsn;
}
/**
* save all the snapshottable directories and snapshots to fsimage
*/
public void serializeSnapshotSection(OutputStream out) throws IOException {
SnapshotManager sm = fsn.getSnapshotManager();
SnapshotSection.Builder b = SnapshotSection.newBuilder()
.setSnapshotCounter(sm.getSnapshotCounter())
.setNumSnapshots(sm.getNumSnapshots());
INodeDirectory[] snapshottables = sm.getSnapshottableDirs();
for (INodeDirectory sdir : snapshottables) {
b.addSnapshottableDir(sdir.getId());
}
b.build().writeDelimitedTo(out);
int i = 0;
for(INodeDirectory sdir : snapshottables) {
for (Snapshot s : sdir.getDirectorySnapshottableFeature()
.getSnapshotList()) {
Root sroot = s.getRoot();
SnapshotSection.Snapshot.Builder sb = SnapshotSection.Snapshot
.newBuilder().setSnapshotId(s.getId());
INodeSection.INodeDirectory.Builder db = buildINodeDirectory(sroot,
parent.getSaverContext());
INodeSection.INode r = INodeSection.INode.newBuilder()
.setId(sroot.getId())
.setType(INodeSection.INode.Type.DIRECTORY)
.setName(ByteString.copyFrom(sroot.getLocalNameBytes()))
.setDirectory(db).build();
sb.setRoot(r).build().writeDelimitedTo(out);
i++;
if (i % FSImageFormatProtobuf.Saver.CHECK_CANCEL_INTERVAL == 0) {
context.checkCancelled();
}
}
}
Preconditions.checkState(i == sm.getNumSnapshots());
parent.commitSection(headers, FSImageFormatProtobuf.SectionName.SNAPSHOT);
}
/**
* This can only be called after serializing both INode_Dir and SnapshotDiff
*/
public void serializeINodeReferenceSection(OutputStream out)
throws IOException {
final List<INodeReference> refList = parent.getSaverContext()
.getRefList();
for (INodeReference ref : refList) {
INodeReferenceSection.INodeReference.Builder rb = buildINodeReference(ref);
rb.build().writeDelimitedTo(out);
}
parent.commitSection(headers, SectionName.INODE_REFERENCE);
}
private INodeReferenceSection.INodeReference.Builder buildINodeReference(
INodeReference ref) throws IOException {
INodeReferenceSection.INodeReference.Builder rb =
INodeReferenceSection.INodeReference.newBuilder().
setReferredId(ref.getId());
if (ref instanceof WithName) {
rb.setLastSnapshotId(((WithName) ref).getLastSnapshotId()).setName(
ByteString.copyFrom(ref.getLocalNameBytes()));
} else if (ref instanceof DstReference) {
rb.setDstSnapshotId(ref.getDstSnapshotId());
}
return rb;
}
/**
* save all the snapshot diff to fsimage
*/
public void serializeSnapshotDiffSection(OutputStream out)
throws IOException {
INodeMap inodesMap = fsn.getFSDirectory().getINodeMap();
final List<INodeReference> refList = parent.getSaverContext()
.getRefList();
int i = 0;
Iterator<INodeWithAdditionalFields> iter = inodesMap.getMapIterator();
while (iter.hasNext()) {
INodeWithAdditionalFields inode = iter.next();
if (inode.isFile()) {
serializeFileDiffList(inode.asFile(), out);
} else if (inode.isDirectory()) {
serializeDirDiffList(inode.asDirectory(), refList, out);
}
++i;
if (i % FSImageFormatProtobuf.Saver.CHECK_CANCEL_INTERVAL == 0) {
context.checkCancelled();
}
}
parent.commitSection(headers,
FSImageFormatProtobuf.SectionName.SNAPSHOT_DIFF);
}
private void serializeFileDiffList(INodeFile file, OutputStream out)
throws IOException {
FileWithSnapshotFeature sf = file.getFileWithSnapshotFeature();
if (sf != null) {
List<FileDiff> diffList = sf.getDiffs().asList();
SnapshotDiffSection.DiffEntry entry = SnapshotDiffSection.DiffEntry
.newBuilder().setInodeId(file.getId()).setType(Type.FILEDIFF)
.setNumOfDiff(diffList.size()).build();
entry.writeDelimitedTo(out);
for (int i = diffList.size() - 1; i >= 0; i--) {
FileDiff diff = diffList.get(i);
SnapshotDiffSection.FileDiff.Builder fb = SnapshotDiffSection.FileDiff
.newBuilder().setSnapshotId(diff.getSnapshotId())
.setFileSize(diff.getFileSize());
if(diff.getBlocks() != null) {
for(Block block : diff.getBlocks()) {
fb.addBlocks(PBHelper.convert(block));
}
}
INodeFileAttributes copy = diff.snapshotINode;
if (copy != null) {
fb.setName(ByteString.copyFrom(copy.getLocalNameBytes()))
.setSnapshotCopy(buildINodeFile(copy, parent.getSaverContext()));
}
fb.build().writeDelimitedTo(out);
}
}
}
private void saveCreatedList(List<INode> created, OutputStream out)
throws IOException {
// local names of the created list member
for (INode c : created) {
SnapshotDiffSection.CreatedListEntry.newBuilder()
.setName(ByteString.copyFrom(c.getLocalNameBytes())).build()
.writeDelimitedTo(out);
}
}
private void serializeDirDiffList(INodeDirectory dir,
final List<INodeReference> refList, OutputStream out)
throws IOException {
DirectoryWithSnapshotFeature sf = dir.getDirectoryWithSnapshotFeature();
if (sf != null) {
List<DirectoryDiff> diffList = sf.getDiffs().asList();
SnapshotDiffSection.DiffEntry entry = SnapshotDiffSection.DiffEntry
.newBuilder().setInodeId(dir.getId()).setType(Type.DIRECTORYDIFF)
.setNumOfDiff(diffList.size()).build();
entry.writeDelimitedTo(out);
for (int i = diffList.size() - 1; i >= 0; i--) { // reverse order!
DirectoryDiff diff = diffList.get(i);
SnapshotDiffSection.DirectoryDiff.Builder db = SnapshotDiffSection.
DirectoryDiff.newBuilder().setSnapshotId(diff.getSnapshotId())
.setChildrenSize(diff.getChildrenSize())
.setIsSnapshotRoot(diff.isSnapshotRoot());
INodeDirectoryAttributes copy = diff.snapshotINode;
if (!diff.isSnapshotRoot() && copy != null) {
db.setName(ByteString.copyFrom(copy.getLocalNameBytes()))
.setSnapshotCopy(
buildINodeDirectory(copy, parent.getSaverContext()));
}
// process created list and deleted list
List<INode> created = diff.getChildrenDiff()
.getList(ListType.CREATED);
db.setCreatedListSize(created.size());
List<INode> deleted = diff.getChildrenDiff().getList(ListType.DELETED);
for (INode d : deleted) {
if (d.isReference()) {
refList.add(d.asReference());
db.addDeletedINodeRef(refList.size() - 1);
} else {
db.addDeletedINode(d.getId());
}
}
db.build().writeDelimitedTo(out);
saveCreatedList(created, out);
}
}
}
}
private FSImageFormatPBSnapshot(){}
}
| 24,905 | 41.429302 | 102 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectorySnapshottableFeature.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode.snapshot;
import java.io.PrintWriter;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
import org.apache.hadoop.hdfs.protocol.SnapshotException;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
import org.apache.hadoop.hdfs.server.namenode.Content;
import org.apache.hadoop.hdfs.server.namenode.ContentSummaryComputationContext;
import org.apache.hadoop.hdfs.server.namenode.INode;
import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
import org.apache.hadoop.hdfs.server.namenode.INodeDirectory.SnapshotAndINode;
import org.apache.hadoop.hdfs.server.namenode.INodeFile;
import org.apache.hadoop.hdfs.server.namenode.INodeReference;
import org.apache.hadoop.hdfs.server.namenode.INodeReference.WithCount;
import org.apache.hadoop.hdfs.server.namenode.INodeReference.WithName;
import org.apache.hadoop.hdfs.util.Diff.ListType;
import org.apache.hadoop.hdfs.util.ReadOnlyList;
import org.apache.hadoop.util.Time;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import com.google.common.collect.Lists;
/**
* A directory with this feature is a snapshottable directory, where snapshots
* can be taken. This feature extends {@link DirectoryWithSnapshotFeature}, and
* maintains extra information about all the snapshots taken on this directory.
*/
@InterfaceAudience.Private
public class DirectorySnapshottableFeature extends DirectoryWithSnapshotFeature {
/** Limit the number of snapshot per snapshottable directory. */
static final int SNAPSHOT_LIMIT = 1 << 16;
/**
* Snapshots of this directory in ascending order of snapshot names.
* Note that snapshots in ascending order of snapshot id are stored in
* {@link DirectoryWithSnapshotFeature}.diffs (a private field).
*/
private final List<Snapshot> snapshotsByNames = new ArrayList<Snapshot>();
/** Number of snapshots allowed. */
private int snapshotQuota = SNAPSHOT_LIMIT;
public DirectorySnapshottableFeature(DirectoryWithSnapshotFeature feature) {
super(feature == null ? null : feature.getDiffs());
}
/** @return the number of existing snapshots. */
public int getNumSnapshots() {
return snapshotsByNames.size();
}
private int searchSnapshot(byte[] snapshotName) {
return Collections.binarySearch(snapshotsByNames, snapshotName);
}
/** @return the snapshot with the given name. */
public Snapshot getSnapshot(byte[] snapshotName) {
final int i = searchSnapshot(snapshotName);
return i < 0? null: snapshotsByNames.get(i);
}
public Snapshot getSnapshotById(int sid) {
for (Snapshot s : snapshotsByNames) {
if (s.getId() == sid) {
return s;
}
}
return null;
}
/** @return {@link #snapshotsByNames} as a {@link ReadOnlyList} */
public ReadOnlyList<Snapshot> getSnapshotList() {
return ReadOnlyList.Util.asReadOnlyList(snapshotsByNames);
}
/**
* Rename a snapshot
* @param path
* The directory path where the snapshot was taken. Used for
* generating exception message.
* @param oldName
* Old name of the snapshot
* @param newName
* New name the snapshot will be renamed to
* @throws SnapshotException
* Throw SnapshotException when either the snapshot with the old
* name does not exist or a snapshot with the new name already
* exists
*/
public void renameSnapshot(String path, String oldName, String newName)
throws SnapshotException {
if (newName.equals(oldName)) {
return;
}
final int indexOfOld = searchSnapshot(DFSUtil.string2Bytes(oldName));
if (indexOfOld < 0) {
throw new SnapshotException("The snapshot " + oldName
+ " does not exist for directory " + path);
} else {
final byte[] newNameBytes = DFSUtil.string2Bytes(newName);
int indexOfNew = searchSnapshot(newNameBytes);
if (indexOfNew >= 0) {
throw new SnapshotException("The snapshot " + newName
+ " already exists for directory " + path);
}
// remove the one with old name from snapshotsByNames
Snapshot snapshot = snapshotsByNames.remove(indexOfOld);
final INodeDirectory ssRoot = snapshot.getRoot();
ssRoot.setLocalName(newNameBytes);
indexOfNew = -indexOfNew - 1;
if (indexOfNew <= indexOfOld) {
snapshotsByNames.add(indexOfNew, snapshot);
} else { // indexOfNew > indexOfOld
snapshotsByNames.add(indexOfNew - 1, snapshot);
}
}
}
public int getSnapshotQuota() {
return snapshotQuota;
}
public void setSnapshotQuota(int snapshotQuota) {
if (snapshotQuota < 0) {
throw new HadoopIllegalArgumentException(
"Cannot set snapshot quota to " + snapshotQuota + " < 0");
}
this.snapshotQuota = snapshotQuota;
}
/**
* Simply add a snapshot into the {@link #snapshotsByNames}. Used when loading
* fsimage.
*/
void addSnapshot(Snapshot snapshot) {
this.snapshotsByNames.add(snapshot);
}
/** Add a snapshot. */
public Snapshot addSnapshot(INodeDirectory snapshotRoot, int id, String name)
throws SnapshotException, QuotaExceededException {
//check snapshot quota
final int n = getNumSnapshots();
if (n + 1 > snapshotQuota) {
throw new SnapshotException("Failed to add snapshot: there are already "
+ n + " snapshot(s) and the snapshot quota is "
+ snapshotQuota);
}
final Snapshot s = new Snapshot(id, name, snapshotRoot);
final byte[] nameBytes = s.getRoot().getLocalNameBytes();
final int i = searchSnapshot(nameBytes);
if (i >= 0) {
throw new SnapshotException("Failed to add snapshot: there is already a "
+ "snapshot with the same name \"" + Snapshot.getSnapshotName(s) + "\".");
}
final DirectoryDiff d = getDiffs().addDiff(id, snapshotRoot);
d.setSnapshotRoot(s.getRoot());
snapshotsByNames.add(-i - 1, s);
// set modification time
final long now = Time.now();
snapshotRoot.updateModificationTime(now, Snapshot.CURRENT_STATE_ID);
s.getRoot().setModificationTime(now, Snapshot.CURRENT_STATE_ID);
return s;
}
/**
* Remove the snapshot with the given name from {@link #snapshotsByNames},
* and delete all the corresponding DirectoryDiff.
*
* @param reclaimContext records blocks and inodes that need to be reclaimed
* @param snapshotRoot The directory where we take snapshots
* @param snapshotName The name of the snapshot to be removed
* @return The removed snapshot. Null if no snapshot with the given name
* exists.
*/
public Snapshot removeSnapshot(
INode.ReclaimContext reclaimContext, INodeDirectory snapshotRoot,
String snapshotName) throws SnapshotException {
final int i = searchSnapshot(DFSUtil.string2Bytes(snapshotName));
if (i < 0) {
throw new SnapshotException("Cannot delete snapshot " + snapshotName
+ " from path " + snapshotRoot.getFullPathName()
+ ": the snapshot does not exist.");
} else {
final Snapshot snapshot = snapshotsByNames.get(i);
int prior = Snapshot.findLatestSnapshot(snapshotRoot, snapshot.getId());
snapshotRoot.cleanSubtree(reclaimContext, snapshot.getId(), prior);
// remove from snapshotsByNames after successfully cleaning the subtree
snapshotsByNames.remove(i);
return snapshot;
}
}
public ContentSummaryComputationContext computeContentSummary(
final BlockStoragePolicySuite bsps,
final INodeDirectory snapshotRoot,
final ContentSummaryComputationContext summary) {
snapshotRoot.computeContentSummary(summary);
summary.getCounts().addContent(Content.SNAPSHOT, snapshotsByNames.size());
summary.getCounts().addContent(Content.SNAPSHOTTABLE_DIRECTORY, 1);
return summary;
}
/**
* Compute the difference between two snapshots (or a snapshot and the current
* directory) of the directory.
*
* @param from The name of the start point of the comparison. Null indicating
* the current tree.
* @param to The name of the end point. Null indicating the current tree.
* @return The difference between the start/end points.
* @throws SnapshotException If there is no snapshot matching the starting
* point, or if endSnapshotName is not null but cannot be identified
* as a previous snapshot.
*/
SnapshotDiffInfo computeDiff(final INodeDirectory snapshotRoot,
final String from, final String to) throws SnapshotException {
Snapshot fromSnapshot = getSnapshotByName(snapshotRoot, from);
Snapshot toSnapshot = getSnapshotByName(snapshotRoot, to);
// if the start point is equal to the end point, return null
if (from.equals(to)) {
return null;
}
SnapshotDiffInfo diffs = new SnapshotDiffInfo(snapshotRoot, fromSnapshot,
toSnapshot);
computeDiffRecursively(snapshotRoot, snapshotRoot, new ArrayList<byte[]>(),
diffs);
return diffs;
}
/**
* Find the snapshot matching the given name.
*
* @param snapshotRoot The directory where snapshots were taken.
* @param snapshotName The name of the snapshot.
* @return The corresponding snapshot. Null if snapshotName is null or empty.
* @throws SnapshotException If snapshotName is not null or empty, but there
* is no snapshot matching the name.
*/
private Snapshot getSnapshotByName(INodeDirectory snapshotRoot,
String snapshotName) throws SnapshotException {
Snapshot s = null;
if (snapshotName != null && !snapshotName.isEmpty()) {
final int index = searchSnapshot(DFSUtil.string2Bytes(snapshotName));
if (index < 0) {
throw new SnapshotException("Cannot find the snapshot of directory "
+ snapshotRoot.getFullPathName() + " with name " + snapshotName);
}
s = snapshotsByNames.get(index);
}
return s;
}
/**
* Recursively compute the difference between snapshots under a given
* directory/file.
* @param snapshotRoot The directory where snapshots were taken.
* @param node The directory/file under which the diff is computed.
* @param parentPath Relative path (corresponding to the snapshot root) of
* the node's parent.
* @param diffReport data structure used to store the diff.
*/
private void computeDiffRecursively(final INodeDirectory snapshotRoot,
INode node, List<byte[]> parentPath, SnapshotDiffInfo diffReport) {
final Snapshot earlierSnapshot = diffReport.isFromEarlier() ?
diffReport.getFrom() : diffReport.getTo();
final Snapshot laterSnapshot = diffReport.isFromEarlier() ?
diffReport.getTo() : diffReport.getFrom();
byte[][] relativePath = parentPath.toArray(new byte[parentPath.size()][]);
if (node.isDirectory()) {
final ChildrenDiff diff = new ChildrenDiff();
INodeDirectory dir = node.asDirectory();
DirectoryWithSnapshotFeature sf = dir.getDirectoryWithSnapshotFeature();
if (sf != null) {
boolean change = sf.computeDiffBetweenSnapshots(earlierSnapshot,
laterSnapshot, diff, dir);
if (change) {
diffReport.addDirDiff(dir, relativePath, diff);
}
}
ReadOnlyList<INode> children = dir.getChildrenList(earlierSnapshot
.getId());
for (INode child : children) {
final byte[] name = child.getLocalNameBytes();
boolean toProcess = diff.searchIndex(ListType.DELETED, name) < 0;
if (!toProcess && child instanceof INodeReference.WithName) {
byte[][] renameTargetPath = findRenameTargetPath(
snapshotRoot, (WithName) child,
laterSnapshot == null ? Snapshot.CURRENT_STATE_ID :
laterSnapshot.getId());
if (renameTargetPath != null) {
toProcess = true;
diffReport.setRenameTarget(child.getId(), renameTargetPath);
}
}
if (toProcess) {
parentPath.add(name);
computeDiffRecursively(snapshotRoot, child, parentPath, diffReport);
parentPath.remove(parentPath.size() - 1);
}
}
} else if (node.isFile() && node.asFile().isWithSnapshot()) {
INodeFile file = node.asFile();
boolean change = file.getFileWithSnapshotFeature()
.changedBetweenSnapshots(file, earlierSnapshot, laterSnapshot);
if (change) {
diffReport.addFileDiff(file, relativePath);
}
}
}
/**
* We just found a deleted WithName node as the source of a rename operation.
* However, we should include it in our snapshot diff report as rename only
* if the rename target is also under the same snapshottable directory.
*/
private byte[][] findRenameTargetPath(final INodeDirectory snapshotRoot,
INodeReference.WithName wn, final int snapshotId) {
INode inode = wn.getReferredINode();
final LinkedList<byte[]> ancestors = Lists.newLinkedList();
while (inode != null) {
if (inode == snapshotRoot) {
return ancestors.toArray(new byte[ancestors.size()][]);
}
if (inode instanceof INodeReference.WithCount) {
inode = ((WithCount) inode).getParentRef(snapshotId);
} else {
INode parent = inode.getParentReference() != null ? inode
.getParentReference() : inode.getParent();
if (parent != null && parent instanceof INodeDirectory) {
int sid = parent.asDirectory().searchChild(inode);
if (sid < snapshotId) {
return null;
}
}
if (!(parent instanceof WithCount)) {
ancestors.addFirst(inode.getLocalNameBytes());
}
inode = parent;
}
}
return null;
}
@Override
public String toString() {
return "snapshotsByNames=" + snapshotsByNames;
}
@VisibleForTesting
public void dumpTreeRecursively(INodeDirectory snapshotRoot, PrintWriter out,
StringBuilder prefix, int snapshot) {
if (snapshot == Snapshot.CURRENT_STATE_ID) {
out.println();
out.print(prefix);
out.print("Snapshot of ");
final String name = snapshotRoot.getLocalName();
out.print(name.isEmpty()? "/": name);
out.print(": quota=");
out.print(getSnapshotQuota());
int n = 0;
for(DirectoryDiff diff : getDiffs()) {
if (diff.isSnapshotRoot()) {
n++;
}
}
Preconditions.checkState(n == snapshotsByNames.size(), "#n=" + n
+ ", snapshotsByNames.size()=" + snapshotsByNames.size());
out.print(", #snapshot=");
out.println(n);
INodeDirectory.dumpTreeRecursively(out, prefix,
new Iterable<SnapshotAndINode>() {
@Override
public Iterator<SnapshotAndINode> iterator() {
return new Iterator<SnapshotAndINode>() {
final Iterator<DirectoryDiff> i = getDiffs().iterator();
private DirectoryDiff next = findNext();
private DirectoryDiff findNext() {
for(; i.hasNext(); ) {
final DirectoryDiff diff = i.next();
if (diff.isSnapshotRoot()) {
return diff;
}
}
return null;
}
@Override
public boolean hasNext() {
return next != null;
}
@Override
public SnapshotAndINode next() {
final SnapshotAndINode pair = new SnapshotAndINode(next
.getSnapshotId(), getSnapshotById(next.getSnapshotId())
.getRoot());
next = findNext();
return pair;
}
@Override
public void remove() {
throw new UnsupportedOperationException();
}
};
}
});
}
}
}
| 17,041 | 37.382883 | 84 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileDiffList.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode.snapshot;
import java.util.Collections;
import java.util.List;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.hdfs.server.namenode.INode;
import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo;
import org.apache.hadoop.hdfs.server.namenode.INodeFile;
import org.apache.hadoop.hdfs.server.namenode.INodeFileAttributes;
/** A list of FileDiffs for storing snapshot data. */
public class FileDiffList extends
AbstractINodeDiffList<INodeFile, INodeFileAttributes, FileDiff> {
@Override
FileDiff createDiff(int snapshotId, INodeFile file) {
return new FileDiff(snapshotId, file);
}
@Override
INodeFileAttributes createSnapshotCopy(INodeFile currentINode) {
return new INodeFileAttributes.SnapshotCopy(currentINode);
}
public void destroyAndCollectSnapshotBlocks(
BlocksMapUpdateInfo collectedBlocks) {
for (FileDiff d : asList()) {
d.destroyAndCollectSnapshotBlocks(collectedBlocks);
}
}
public void saveSelf2Snapshot(int latestSnapshotId, INodeFile iNodeFile,
INodeFileAttributes snapshotCopy, boolean withBlocks) {
final FileDiff diff =
super.saveSelf2Snapshot(latestSnapshotId, iNodeFile, snapshotCopy);
if (withBlocks) { // Store blocks if this is the first update
diff.setBlocks(iNodeFile.getBlocks());
}
}
public BlockInfo[] findEarlierSnapshotBlocks(int snapshotId) {
assert snapshotId != Snapshot.NO_SNAPSHOT_ID : "Wrong snapshot id";
if (snapshotId == Snapshot.CURRENT_STATE_ID) {
return null;
}
List<FileDiff> diffs = this.asList();
int i = Collections.binarySearch(diffs, snapshotId);
BlockInfo[] blocks = null;
for(i = i >= 0 ? i : -i-2; i >= 0; i--) {
blocks = diffs.get(i).getBlocks();
if(blocks != null) {
break;
}
}
return blocks;
}
public BlockInfo[] findLaterSnapshotBlocks(int snapshotId) {
assert snapshotId != Snapshot.NO_SNAPSHOT_ID : "Wrong snapshot id";
if (snapshotId == Snapshot.CURRENT_STATE_ID) {
return null;
}
List<FileDiff> diffs = this.asList();
int i = Collections.binarySearch(diffs, snapshotId);
BlockInfo[] blocks = null;
for (i = i >= 0 ? i+1 : -i-1; i < diffs.size(); i++) {
blocks = diffs.get(i).getBlocks();
if (blocks != null) {
break;
}
}
return blocks;
}
/**
* Copy blocks from the removed snapshot into the previous snapshot
* up to the file length of the latter.
* Collect unused blocks of the removed snapshot.
*/
void combineAndCollectSnapshotBlocks(
INode.ReclaimContext reclaimContext, INodeFile file, FileDiff removed) {
BlockInfo[] removedBlocks = removed.getBlocks();
if (removedBlocks == null) {
FileWithSnapshotFeature sf = file.getFileWithSnapshotFeature();
assert sf != null : "FileWithSnapshotFeature is null";
if(sf.isCurrentFileDeleted())
sf.collectBlocksAndClear(reclaimContext, file);
return;
}
int p = getPrior(removed.getSnapshotId(), true);
FileDiff earlierDiff = p == Snapshot.NO_SNAPSHOT_ID ? null : getDiffById(p);
// Copy blocks to the previous snapshot if not set already
if (earlierDiff != null) {
earlierDiff.setBlocks(removedBlocks);
}
BlockInfo[] earlierBlocks =
(earlierDiff == null ? new BlockInfo[]{} : earlierDiff.getBlocks());
// Find later snapshot (or file itself) with blocks
BlockInfo[] laterBlocks = findLaterSnapshotBlocks(removed.getSnapshotId());
laterBlocks = (laterBlocks==null) ? file.getBlocks() : laterBlocks;
// Skip blocks, which belong to either the earlier or the later lists
int i = 0;
for(; i < removedBlocks.length; i++) {
if(i < earlierBlocks.length && removedBlocks[i] == earlierBlocks[i])
continue;
if(i < laterBlocks.length && removedBlocks[i] == laterBlocks[i])
continue;
break;
}
// Check if last block is part of truncate recovery
BlockInfo lastBlock = file.getLastBlock();
Block dontRemoveBlock = null;
if (lastBlock != null && lastBlock.getBlockUCState().equals(
HdfsServerConstants.BlockUCState.UNDER_RECOVERY)) {
dontRemoveBlock = ((BlockInfoContiguousUnderConstruction) lastBlock)
.getTruncateBlock();
}
// Collect the remaining blocks of the file, ignoring truncate block
for (;i < removedBlocks.length; i++) {
if(dontRemoveBlock == null || !removedBlocks[i].equals(dontRemoveBlock)) {
reclaimContext.collectedBlocks().addDeleteBlock(removedBlocks[i]);
}
}
}
}
| 5,669 | 37.571429 | 90 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotDiffInfo.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode.snapshot;
import java.util.ArrayList;
import java.util.Comparator;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.SortedMap;
import java.util.TreeMap;
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffReportEntry;
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffType;
import org.apache.hadoop.hdfs.server.namenode.INode;
import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
import org.apache.hadoop.hdfs.server.namenode.INodeFile;
import org.apache.hadoop.hdfs.server.namenode.INodeReference;
import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.ChildrenDiff;
import org.apache.hadoop.hdfs.util.Diff.ListType;
import com.google.common.base.Preconditions;
import com.google.common.primitives.SignedBytes;
/**
* A class describing the difference between snapshots of a snapshottable
* directory.
*/
class SnapshotDiffInfo {
/** Compare two inodes based on their full names */
public static final Comparator<INode> INODE_COMPARATOR =
new Comparator<INode>() {
@Override
public int compare(INode left, INode right) {
if (left == null) {
return right == null ? 0 : -1;
} else {
if (right == null) {
return 1;
} else {
int cmp = compare(left.getParent(), right.getParent());
return cmp == 0 ? SignedBytes.lexicographicalComparator().compare(
left.getLocalNameBytes(), right.getLocalNameBytes()) : cmp;
}
}
}
};
static class RenameEntry {
private byte[][] sourcePath;
private byte[][] targetPath;
void setSource(INode source, byte[][] sourceParentPath) {
Preconditions.checkState(sourcePath == null);
sourcePath = new byte[sourceParentPath.length + 1][];
System.arraycopy(sourceParentPath, 0, sourcePath, 0,
sourceParentPath.length);
sourcePath[sourcePath.length - 1] = source.getLocalNameBytes();
}
void setTarget(INode target, byte[][] targetParentPath) {
targetPath = new byte[targetParentPath.length + 1][];
System.arraycopy(targetParentPath, 0, targetPath, 0,
targetParentPath.length);
targetPath[targetPath.length - 1] = target.getLocalNameBytes();
}
void setTarget(byte[][] targetPath) {
this.targetPath = targetPath;
}
boolean isRename() {
return sourcePath != null && targetPath != null;
}
byte[][] getSourcePath() {
return sourcePath;
}
byte[][] getTargetPath() {
return targetPath;
}
}
/** The root directory of the snapshots */
private final INodeDirectory snapshotRoot;
/** The starting point of the difference */
private final Snapshot from;
/** The end point of the difference */
private final Snapshot to;
/**
* A map recording modified INodeFile and INodeDirectory and their relative
* path corresponding to the snapshot root. Sorted based on their names.
*/
private final SortedMap<INode, byte[][]> diffMap =
new TreeMap<INode, byte[][]>(INODE_COMPARATOR);
/**
* A map capturing the detailed difference about file creation/deletion.
* Each key indicates a directory whose children have been changed between
* the two snapshots, while its associated value is a {@link ChildrenDiff}
* storing the changes (creation/deletion) happened to the children (files).
*/
private final Map<INodeDirectory, ChildrenDiff> dirDiffMap =
new HashMap<INodeDirectory, ChildrenDiff>();
private final Map<Long, RenameEntry> renameMap =
new HashMap<Long, RenameEntry>();
SnapshotDiffInfo(INodeDirectory snapshotRoot, Snapshot start, Snapshot end) {
Preconditions.checkArgument(snapshotRoot.isSnapshottable());
this.snapshotRoot = snapshotRoot;
this.from = start;
this.to = end;
}
/** Add a dir-diff pair */
void addDirDiff(INodeDirectory dir, byte[][] relativePath, ChildrenDiff diff) {
dirDiffMap.put(dir, diff);
diffMap.put(dir, relativePath);
// detect rename
for (INode created : diff.getList(ListType.CREATED)) {
if (created.isReference()) {
RenameEntry entry = getEntry(created.getId());
if (entry.getTargetPath() == null) {
entry.setTarget(created, relativePath);
}
}
}
for (INode deleted : diff.getList(ListType.DELETED)) {
if (deleted instanceof INodeReference.WithName) {
RenameEntry entry = getEntry(deleted.getId());
entry.setSource(deleted, relativePath);
}
}
}
Snapshot getFrom() {
return from;
}
Snapshot getTo() {
return to;
}
private RenameEntry getEntry(long inodeId) {
RenameEntry entry = renameMap.get(inodeId);
if (entry == null) {
entry = new RenameEntry();
renameMap.put(inodeId, entry);
}
return entry;
}
void setRenameTarget(long inodeId, byte[][] path) {
getEntry(inodeId).setTarget(path);
}
/** Add a modified file */
void addFileDiff(INodeFile file, byte[][] relativePath) {
diffMap.put(file, relativePath);
}
/** @return True if {@link #from} is earlier than {@link #to} */
boolean isFromEarlier() {
return Snapshot.ID_COMPARATOR.compare(from, to) < 0;
}
/**
* Generate a {@link SnapshotDiffReport} based on detailed diff information.
* @return A {@link SnapshotDiffReport} describing the difference
*/
public SnapshotDiffReport generateReport() {
List<DiffReportEntry> diffReportList = new ArrayList<DiffReportEntry>();
for (Map.Entry<INode,byte[][]> drEntry : diffMap.entrySet()) {
INode node = drEntry.getKey();
byte[][] path = drEntry.getValue();
diffReportList.add(new DiffReportEntry(DiffType.MODIFY, path, null));
if (node.isDirectory()) {
List<DiffReportEntry> subList = generateReport(dirDiffMap.get(node),
path, isFromEarlier(), renameMap);
diffReportList.addAll(subList);
}
}
return new SnapshotDiffReport(snapshotRoot.getFullPathName(),
Snapshot.getSnapshotName(from), Snapshot.getSnapshotName(to),
diffReportList);
}
/**
* Interpret the ChildrenDiff and generate a list of {@link DiffReportEntry}.
* @param dirDiff The ChildrenDiff.
* @param parentPath The relative path of the parent.
* @param fromEarlier True indicates {@code diff=later-earlier},
* False indicates {@code diff=earlier-later}
* @param renameMap A map containing information about rename operations.
* @return A list of {@link DiffReportEntry} as the diff report.
*/
private List<DiffReportEntry> generateReport(ChildrenDiff dirDiff,
byte[][] parentPath, boolean fromEarlier, Map<Long, RenameEntry> renameMap) {
List<DiffReportEntry> list = new ArrayList<DiffReportEntry>();
List<INode> created = dirDiff.getList(ListType.CREATED);
List<INode> deleted = dirDiff.getList(ListType.DELETED);
byte[][] fullPath = new byte[parentPath.length + 1][];
System.arraycopy(parentPath, 0, fullPath, 0, parentPath.length);
for (INode cnode : created) {
RenameEntry entry = renameMap.get(cnode.getId());
if (entry == null || !entry.isRename()) {
fullPath[fullPath.length - 1] = cnode.getLocalNameBytes();
list.add(new DiffReportEntry(fromEarlier ? DiffType.CREATE
: DiffType.DELETE, fullPath));
}
}
for (INode dnode : deleted) {
RenameEntry entry = renameMap.get(dnode.getId());
if (entry != null && entry.isRename()) {
list.add(new DiffReportEntry(DiffType.RENAME,
fromEarlier ? entry.getSourcePath() : entry.getTargetPath(),
fromEarlier ? entry.getTargetPath() : entry.getSourcePath()));
} else {
fullPath[fullPath.length - 1] = dnode.getLocalNameBytes();
list.add(new DiffReportEntry(fromEarlier ? DiffType.DELETE
: DiffType.CREATE, fullPath));
}
}
return list;
}
}
| 8,861 | 35.319672 | 97 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileDiff.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode.snapshot;
import java.io.DataOutput;
import java.io.IOException;
import java.util.Arrays;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.namenode.FSImageSerialization;
import org.apache.hadoop.hdfs.server.namenode.INode;
import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo;
import org.apache.hadoop.hdfs.server.namenode.INodeFile;
import org.apache.hadoop.hdfs.server.namenode.INodeFileAttributes;
import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotFSImageFormat.ReferenceMap;
/**
* The difference of an {@link INodeFile} between two snapshots.
*/
public class FileDiff extends
AbstractINodeDiff<INodeFile, INodeFileAttributes, FileDiff> {
/** The file size at snapshot creation time. */
private final long fileSize;
/** A copy of the INodeFile block list. Used in truncate. */
private BlockInfo[] blocks;
FileDiff(int snapshotId, INodeFile file) {
super(snapshotId, null, null);
fileSize = file.computeFileSize();
blocks = null;
}
/** Constructor used by FSImage loading */
FileDiff(int snapshotId, INodeFileAttributes snapshotINode,
FileDiff posteriorDiff, long fileSize) {
super(snapshotId, snapshotINode, posteriorDiff);
this.fileSize = fileSize;
blocks = null;
}
/** @return the file size in the snapshot. */
public long getFileSize() {
return fileSize;
}
/**
* Copy block references into the snapshot
* up to the current {@link #fileSize}.
* Should be done only once.
*/
public void setBlocks(BlockInfo[] blocks) {
if(this.blocks != null)
return;
int numBlocks = 0;
for(long s = 0; numBlocks < blocks.length && s < fileSize; numBlocks++)
s += blocks[numBlocks].getNumBytes();
this.blocks = Arrays.copyOf(blocks, numBlocks);
}
public BlockInfo[] getBlocks() {
return blocks;
}
@Override
void combinePosteriorAndCollectBlocks(
INode.ReclaimContext reclaimContext, INodeFile currentINode,
FileDiff posterior) {
FileWithSnapshotFeature sf = currentINode.getFileWithSnapshotFeature();
assert sf != null : "FileWithSnapshotFeature is null";
sf.updateQuotaAndCollectBlocks(reclaimContext, currentINode, posterior);
}
@Override
public String toString() {
return super.toString() + " fileSize=" + fileSize + ", rep="
+ (snapshotINode == null? "?": snapshotINode.getFileReplication());
}
@Override
void write(DataOutput out, ReferenceMap referenceMap) throws IOException {
writeSnapshot(out);
out.writeLong(fileSize);
// write snapshotINode
if (snapshotINode != null) {
out.writeBoolean(true);
FSImageSerialization.writeINodeFileAttributes(snapshotINode, out);
} else {
out.writeBoolean(false);
}
}
@Override
void destroyDiffAndCollectBlocks(INode.ReclaimContext reclaimContext,
INodeFile currentINode) {
currentINode.getFileWithSnapshotFeature().updateQuotaAndCollectBlocks(
reclaimContext, currentINode, this);
}
public void destroyAndCollectSnapshotBlocks(
BlocksMapUpdateInfo collectedBlocks) {
if (blocks == null || collectedBlocks == null) {
return;
}
for (BlockInfo blk : blocks) {
collectedBlocks.addDeleteBlock(blk);
}
blocks = null;
}
}
| 4,180 | 31.92126 | 90 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/AbstractINodeDiff.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode.snapshot;
import java.io.DataOutput;
import java.io.IOException;
import org.apache.hadoop.hdfs.server.namenode.INode;
import org.apache.hadoop.hdfs.server.namenode.INodeAttributes;
import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotFSImageFormat.ReferenceMap;
import com.google.common.base.Preconditions;
/**
* The difference of an inode between in two snapshots.
* {@link AbstractINodeDiffList} maintains a list of snapshot diffs,
* <pre>
* d_1 -> d_2 -> ... -> d_n -> null,
* </pre>
* where -> denotes the {@link AbstractINodeDiff#posteriorDiff} reference. The
* current directory state is stored in the field of {@link INode}.
* The snapshot state can be obtained by applying the diffs one-by-one in
* reversed chronological order. Let s_1, s_2, ..., s_n be the corresponding
* snapshots. Then,
* <pre>
* s_n = (current state) - d_n;
* s_{n-1} = s_n - d_{n-1} = (current state) - d_n - d_{n-1};
* ...
* s_k = s_{k+1} - d_k = (current state) - d_n - d_{n-1} - ... - d_k.
* </pre>
*/
abstract class AbstractINodeDiff<N extends INode,
A extends INodeAttributes,
D extends AbstractINodeDiff<N, A, D>>
implements Comparable<Integer> {
/** The id of the corresponding snapshot. */
private int snapshotId;
/** The snapshot inode data. It is null when there is no change. */
A snapshotINode;
/**
* Posterior diff is the diff happened after this diff.
* The posterior diff should be first applied to obtain the posterior
* snapshot and then apply this diff in order to obtain this snapshot.
* If the posterior diff is null, the posterior state is the current state.
*/
private D posteriorDiff;
AbstractINodeDiff(int snapshotId, A snapshotINode, D posteriorDiff) {
this.snapshotId = snapshotId;
this.snapshotINode = snapshotINode;
this.posteriorDiff = posteriorDiff;
}
/** Compare diffs with snapshot ID. */
@Override
public final int compareTo(final Integer that) {
return Snapshot.ID_INTEGER_COMPARATOR.compare(this.snapshotId, that);
}
/** @return the snapshot object of this diff. */
public final int getSnapshotId() {
return snapshotId;
}
final void setSnapshotId(int snapshot) {
this.snapshotId = snapshot;
}
/** @return the posterior diff. */
final D getPosterior() {
return posteriorDiff;
}
final void setPosterior(D posterior) {
posteriorDiff = posterior;
}
/** Save the INode state to the snapshot if it is not done already. */
void saveSnapshotCopy(A snapshotCopy) {
Preconditions.checkState(snapshotINode == null, "Expected snapshotINode to be null");
snapshotINode = snapshotCopy;
}
/** @return the inode corresponding to the snapshot. */
A getSnapshotINode() {
// get from this diff, then the posterior diff
// and then null for the current inode
for(AbstractINodeDiff<N, A, D> d = this; ; d = d.posteriorDiff) {
if (d.snapshotINode != null) {
return d.snapshotINode;
} else if (d.posteriorDiff == null) {
return null;
}
}
}
/** Combine the posterior diff and collect blocks for deletion. */
abstract void combinePosteriorAndCollectBlocks(
INode.ReclaimContext reclaimContext, final N currentINode,
final D posterior);
/**
* Delete and clear self.
* @param reclaimContext blocks and inodes that need to be reclaimed
* @param currentINode The inode where the deletion happens.
*/
abstract void destroyDiffAndCollectBlocks(INode.ReclaimContext reclaimContext,
final N currentINode);
@Override
public String toString() {
return getClass().getSimpleName() + ": " + this.getSnapshotId() + " (post="
+ (posteriorDiff == null? null: posteriorDiff.getSnapshotId()) + ")";
}
void writeSnapshot(DataOutput out) throws IOException {
out.writeInt(snapshotId);
}
abstract void write(DataOutput out, ReferenceMap referenceMap
) throws IOException;
}
| 4,889 | 34.179856 | 90 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileWithSnapshotFeature.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode.snapshot;
import java.util.List;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.namenode.AclFeature;
import org.apache.hadoop.hdfs.server.namenode.INode;
import org.apache.hadoop.hdfs.server.namenode.AclStorage;
import org.apache.hadoop.hdfs.server.namenode.INodeFile;
import org.apache.hadoop.hdfs.server.namenode.INodeFileAttributes;
import org.apache.hadoop.hdfs.server.namenode.QuotaCounts;
import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
/**
* Feature for file with snapshot-related information.
*/
@InterfaceAudience.Private
public class FileWithSnapshotFeature implements INode.Feature {
private final FileDiffList diffs;
private boolean isCurrentFileDeleted = false;
public FileWithSnapshotFeature(FileDiffList diffs) {
this.diffs = diffs != null? diffs: new FileDiffList();
}
public boolean isCurrentFileDeleted() {
return isCurrentFileDeleted;
}
/**
* We need to distinguish two scenarios:
* 1) the file is still in the current file directory, it has been modified
* before while it is included in some snapshot
* 2) the file is not in the current file directory (deleted), but it is in
* some snapshot, thus we still keep this inode
* For both scenarios the file has snapshot feature. We set
* {@link #isCurrentFileDeleted} to true for 2).
*/
public void deleteCurrentFile() {
isCurrentFileDeleted = true;
}
public FileDiffList getDiffs() {
return diffs;
}
/** @return the max replication factor in diffs */
public short getMaxBlockRepInDiffs() {
short max = 0;
for(FileDiff d : getDiffs()) {
if (d.snapshotINode != null) {
final short replication = d.snapshotINode.getFileReplication();
if (replication > max) {
max = replication;
}
}
}
return max;
}
boolean changedBetweenSnapshots(INodeFile file, Snapshot from, Snapshot to) {
int[] diffIndexPair = diffs.changedBetweenSnapshots(from, to);
if (diffIndexPair == null) {
return false;
}
int earlierDiffIndex = diffIndexPair[0];
int laterDiffIndex = diffIndexPair[1];
final List<FileDiff> diffList = diffs.asList();
final long earlierLength = diffList.get(earlierDiffIndex).getFileSize();
final long laterLength = laterDiffIndex == diffList.size() ? file
.computeFileSize(true, false) : diffList.get(laterDiffIndex)
.getFileSize();
if (earlierLength != laterLength) { // file length has been changed
return true;
}
INodeFileAttributes earlierAttr = null; // check the metadata
for (int i = earlierDiffIndex; i < laterDiffIndex; i++) {
FileDiff diff = diffList.get(i);
if (diff.snapshotINode != null) {
earlierAttr = diff.snapshotINode;
break;
}
}
if (earlierAttr == null) { // no meta-change at all, return false
return false;
}
INodeFileAttributes laterAttr = diffs.getSnapshotINode(
Math.max(Snapshot.getSnapshotId(from), Snapshot.getSnapshotId(to)),
file);
return !earlierAttr.metadataEquals(laterAttr);
}
public String getDetailedString() {
return (isCurrentFileDeleted()? "(DELETED), ": ", ") + diffs;
}
public void cleanFile(INode.ReclaimContext reclaimContext,
final INodeFile file, final int snapshotId, int priorSnapshotId,
byte storagePolicyId) {
if (snapshotId == Snapshot.CURRENT_STATE_ID) {
// delete the current file while the file has snapshot feature
if (!isCurrentFileDeleted()) {
file.recordModification(priorSnapshotId);
deleteCurrentFile();
}
final BlockStoragePolicy policy = reclaimContext.storagePolicySuite()
.getPolicy(storagePolicyId);
QuotaCounts old = file.storagespaceConsumed(policy);
collectBlocksAndClear(reclaimContext, file);
QuotaCounts current = file.storagespaceConsumed(policy);
reclaimContext.quotaDelta().add(old.subtract(current));
} else { // delete the snapshot
priorSnapshotId = getDiffs().updatePrior(snapshotId, priorSnapshotId);
diffs.deleteSnapshotDiff(reclaimContext, snapshotId, priorSnapshotId,
file);
}
}
public void clearDiffs() {
this.diffs.clear();
}
public void updateQuotaAndCollectBlocks(INode.ReclaimContext reclaimContext,
INodeFile file, FileDiff removed) {
byte storagePolicyID = file.getStoragePolicyID();
BlockStoragePolicy bsp = null;
if (storagePolicyID != HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED) {
bsp = reclaimContext.storagePolicySuite().getPolicy(file.getStoragePolicyID());
}
QuotaCounts oldCounts = file.storagespaceConsumed(null);
long oldStoragespace;
if (removed.snapshotINode != null) {
short replication = removed.snapshotINode.getFileReplication();
short currentRepl = file.getPreferredBlockReplication();
if (replication > currentRepl) {
long oldFileSizeNoRep = currentRepl == 0
? file.computeFileSize(true, true)
: oldCounts.getStorageSpace() /
file.getPreferredBlockReplication();
oldStoragespace = oldFileSizeNoRep * replication;
oldCounts.setStorageSpace(oldStoragespace);
if (bsp != null) {
List<StorageType> oldTypeChosen = bsp.chooseStorageTypes(replication);
for (StorageType t : oldTypeChosen) {
if (t.supportTypeQuota()) {
oldCounts.addTypeSpace(t, oldFileSizeNoRep);
}
}
}
}
AclFeature aclFeature = removed.getSnapshotINode().getAclFeature();
if (aclFeature != null) {
AclStorage.removeAclFeature(aclFeature);
}
}
getDiffs().combineAndCollectSnapshotBlocks(reclaimContext, file, removed);
QuotaCounts current = file.storagespaceConsumed(bsp);
reclaimContext.quotaDelta().add(oldCounts.subtract(current));
}
/**
* If some blocks at the end of the block list no longer belongs to
* any inode, collect them and update the block list.
*/
public void collectBlocksAndClear(
INode.ReclaimContext reclaimContext, final INodeFile file) {
// check if everything is deleted.
if (isCurrentFileDeleted() && getDiffs().asList().isEmpty()) {
file.clearFile(reclaimContext);
return;
}
// find max file size.
final long max;
FileDiff diff = getDiffs().getLast();
if (isCurrentFileDeleted()) {
max = diff == null? 0: diff.getFileSize();
} else {
max = file.computeFileSize();
}
// Collect blocks that should be deleted
FileDiff last = diffs.getLast();
BlockInfo[] snapshotBlocks = last == null ? null : last.getBlocks();
if(snapshotBlocks == null)
file.collectBlocksBeyondMax(max, reclaimContext.collectedBlocks());
else
file.collectBlocksBeyondSnapshot(snapshotBlocks,
reclaimContext.collectedBlocks());
}
}
| 8,005 | 35.557078 | 85 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotStatsMXBean.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode.snapshot;
import org.apache.hadoop.hdfs.protocol.SnapshotInfo;
import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
/**
* This is an interface used to retrieve statistic information related to
* snapshots
*/
public interface SnapshotStatsMXBean {
/**
* Return the list of snapshottable directories
*
* @return the list of snapshottable directories
*/
public SnapshottableDirectoryStatus.Bean[] getSnapshottableDirectories();
/**
* Return the list of snapshots
*
* @return the list of snapshots
*/
public SnapshotInfo.Bean[] getSnapshots();
}
| 1,452 | 32.022727 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/Snapshot.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode.snapshot;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.text.SimpleDateFormat;
import java.util.Arrays;
import java.util.Comparator;
import java.util.Date;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.server.namenode.AclFeature;
import org.apache.hadoop.hdfs.server.namenode.ContentSummaryComputationContext;
import org.apache.hadoop.hdfs.server.namenode.FSImageFormat;
import org.apache.hadoop.hdfs.server.namenode.FSImageSerialization;
import org.apache.hadoop.hdfs.server.namenode.INode;
import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
import org.apache.hadoop.hdfs.server.namenode.XAttrFeature;
import org.apache.hadoop.hdfs.util.ReadOnlyList;
import com.google.common.base.Predicate;
import com.google.common.collect.Iterables;
import com.google.common.collect.Lists;
/** Snapshot of a sub-tree in the namesystem. */
@InterfaceAudience.Private
public class Snapshot implements Comparable<byte[]> {
/**
* This id is used to indicate the current state (vs. snapshots)
*/
public static final int CURRENT_STATE_ID = Integer.MAX_VALUE - 1;
public static final int NO_SNAPSHOT_ID = -1;
/**
* The pattern for generating the default snapshot name.
* E.g. s20130412-151029.033
*/
private static final String DEFAULT_SNAPSHOT_NAME_PATTERN = "'s'yyyyMMdd-HHmmss.SSS";
public static String generateDefaultSnapshotName() {
return new SimpleDateFormat(DEFAULT_SNAPSHOT_NAME_PATTERN).format(new Date());
}
public static String getSnapshotPath(String snapshottableDir,
String snapshotRelativePath) {
final StringBuilder b = new StringBuilder(snapshottableDir);
if (b.charAt(b.length() - 1) != Path.SEPARATOR_CHAR) {
b.append(Path.SEPARATOR);
}
return b.append(HdfsConstants.DOT_SNAPSHOT_DIR)
.append(Path.SEPARATOR)
.append(snapshotRelativePath)
.toString();
}
/**
* Get the name of the given snapshot.
* @param s The given snapshot.
* @return The name of the snapshot, or an empty string if {@code s} is null
*/
static String getSnapshotName(Snapshot s) {
return s != null ? s.getRoot().getLocalName() : "";
}
public static int getSnapshotId(Snapshot s) {
return s == null ? CURRENT_STATE_ID : s.getId();
}
/**
* Compare snapshot with IDs, where null indicates the current status thus
* is greater than any non-null snapshot.
*/
public static final Comparator<Snapshot> ID_COMPARATOR
= new Comparator<Snapshot>() {
@Override
public int compare(Snapshot left, Snapshot right) {
return ID_INTEGER_COMPARATOR.compare(Snapshot.getSnapshotId(left),
Snapshot.getSnapshotId(right));
}
};
/**
* Compare snapshot with IDs, where null indicates the current status thus
* is greater than any non-null ID.
*/
public static final Comparator<Integer> ID_INTEGER_COMPARATOR
= new Comparator<Integer>() {
@Override
public int compare(Integer left, Integer right) {
// Snapshot.CURRENT_STATE_ID means the current state, thus should be the
// largest
return left - right;
}
};
/**
* Find the latest snapshot that 1) covers the given inode (which means the
* snapshot was either taken on the inode or taken on an ancestor of the
* inode), and 2) was taken before the given snapshot (if the given snapshot
* is not null).
*
* @param inode the given inode that the returned snapshot needs to cover
* @param anchor the returned snapshot should be taken before this given id.
* @return id of the latest snapshot that covers the given inode and was taken
* before the the given snapshot (if it is not null).
*/
public static int findLatestSnapshot(INode inode, final int anchor) {
int latest = NO_SNAPSHOT_ID;
for(; inode != null; inode = inode.getParent()) {
if (inode.isDirectory()) {
final INodeDirectory dir = inode.asDirectory();
if (dir.isWithSnapshot()) {
latest = dir.getDiffs().updatePrior(anchor, latest);
}
}
}
return latest;
}
static Snapshot read(DataInput in, FSImageFormat.Loader loader)
throws IOException {
final int snapshotId = in.readInt();
final INode root = loader.loadINodeWithLocalName(false, in, false);
return new Snapshot(snapshotId, root.asDirectory(), null);
}
/** The root directory of the snapshot. */
static public class Root extends INodeDirectory {
Root(INodeDirectory other) {
// Always preserve ACL, XAttr.
super(other, false, Lists.newArrayList(
Iterables.filter(Arrays.asList(other.getFeatures()), new Predicate<Feature>() {
@Override
public boolean apply(Feature input) {
if (AclFeature.class.isInstance(input)
|| XAttrFeature.class.isInstance(input)) {
return true;
}
return false;
}
}))
.toArray(new Feature[0]));
}
@Override
public ReadOnlyList<INode> getChildrenList(int snapshotId) {
return getParent().getChildrenList(snapshotId);
}
@Override
public INode getChild(byte[] name, int snapshotId) {
return getParent().getChild(name, snapshotId);
}
@Override
public ContentSummaryComputationContext computeContentSummary(
ContentSummaryComputationContext summary) {
int snapshotId = getParent().getSnapshot(getLocalNameBytes()).getId();
return computeDirectoryContentSummary(summary, snapshotId);
}
@Override
public String getFullPathName() {
return getSnapshotPath(getParent().getFullPathName(), getLocalName());
}
}
/** Snapshot ID. */
private final int id;
/** The root directory of the snapshot. */
private final Root root;
Snapshot(int id, String name, INodeDirectory dir) {
this(id, dir, dir);
this.root.setLocalName(DFSUtil.string2Bytes(name));
}
Snapshot(int id, INodeDirectory dir, INodeDirectory parent) {
this.id = id;
this.root = new Root(dir);
this.root.setParent(parent);
}
public int getId() {
return id;
}
/** @return the root directory of the snapshot. */
public Root getRoot() {
return root;
}
@Override
public int compareTo(byte[] bytes) {
return root.compareTo(bytes);
}
@Override
public boolean equals(Object that) {
if (this == that) {
return true;
} else if (that == null || !(that instanceof Snapshot)) {
return false;
}
return this.id == ((Snapshot)that).id;
}
@Override
public int hashCode() {
return id;
}
@Override
public String toString() {
return getClass().getSimpleName() + "." + root.getLocalName() + "(id=" + id + ")";
}
/** Serialize the fields to out */
void write(DataOutput out) throws IOException {
out.writeInt(id);
// write root
FSImageSerialization.writeINodeDirectory(root, out);
}
}
| 8,006 | 31.417004 | 87 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode.snapshot;
import java.io.DataOutput;
import java.io.IOException;
import java.util.ArrayDeque;
import java.util.Deque;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
import org.apache.hadoop.hdfs.server.namenode.AclStorage;
import org.apache.hadoop.hdfs.server.namenode.Content;
import org.apache.hadoop.hdfs.server.namenode.ContentCounts;
import org.apache.hadoop.hdfs.server.namenode.ContentSummaryComputationContext;
import org.apache.hadoop.hdfs.server.namenode.FSImageSerialization;
import org.apache.hadoop.hdfs.server.namenode.INode;
import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
import org.apache.hadoop.hdfs.server.namenode.INodeDirectoryAttributes;
import org.apache.hadoop.hdfs.server.namenode.INodeFile;
import org.apache.hadoop.hdfs.server.namenode.INodeReference;
import org.apache.hadoop.hdfs.server.namenode.QuotaCounts;
import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotFSImageFormat.ReferenceMap;
import org.apache.hadoop.hdfs.util.Diff;
import org.apache.hadoop.hdfs.util.Diff.Container;
import org.apache.hadoop.hdfs.util.Diff.ListType;
import org.apache.hadoop.hdfs.util.Diff.UndoInfo;
import org.apache.hadoop.hdfs.util.ReadOnlyList;
import com.google.common.base.Preconditions;
import static org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot.NO_SNAPSHOT_ID;
/**
* Feature used to store and process the snapshot diff information for a
* directory. In particular, it contains a directory diff list recording changes
* made to the directory and its children for each snapshot.
*/
@InterfaceAudience.Private
public class DirectoryWithSnapshotFeature implements INode.Feature {
/**
* The difference between the current state and a previous snapshot
* of the children list of an INodeDirectory.
*/
static class ChildrenDiff extends Diff<byte[], INode> {
ChildrenDiff() {}
private ChildrenDiff(final List<INode> created, final List<INode> deleted) {
super(created, deleted);
}
/**
* Replace the given child from the created/deleted list.
* @return true if the child is replaced; false if the child is not found.
*/
private boolean replace(final ListType type,
final INode oldChild, final INode newChild) {
final List<INode> list = getList(type);
final int i = search(list, oldChild.getLocalNameBytes());
if (i < 0 || list.get(i).getId() != oldChild.getId()) {
return false;
}
final INode removed = list.set(i, newChild);
Preconditions.checkState(removed == oldChild);
return true;
}
private boolean removeChild(ListType type, final INode child) {
final List<INode> list = getList(type);
final int i = searchIndex(type, child.getLocalNameBytes());
if (i >= 0 && list.get(i) == child) {
list.remove(i);
return true;
}
return false;
}
/** clear the created list */
private void destroyCreatedList(INode.ReclaimContext reclaimContext,
final INodeDirectory currentINode) {
final List<INode> createdList = getList(ListType.CREATED);
for (INode c : createdList) {
c.destroyAndCollectBlocks(reclaimContext);
// c should be contained in the children list, remove it
currentINode.removeChild(c);
}
createdList.clear();
}
/** clear the deleted list */
private void destroyDeletedList(INode.ReclaimContext reclaimContext) {
final List<INode> deletedList = getList(ListType.DELETED);
for (INode d : deletedList) {
d.destroyAndCollectBlocks(reclaimContext);
}
deletedList.clear();
}
/** Serialize {@link #created} */
private void writeCreated(DataOutput out) throws IOException {
final List<INode> created = getList(ListType.CREATED);
out.writeInt(created.size());
for (INode node : created) {
// For INode in created list, we only need to record its local name
byte[] name = node.getLocalNameBytes();
out.writeShort(name.length);
out.write(name);
}
}
/** Serialize {@link #deleted} */
private void writeDeleted(DataOutput out,
ReferenceMap referenceMap) throws IOException {
final List<INode> deleted = getList(ListType.DELETED);
out.writeInt(deleted.size());
for (INode node : deleted) {
FSImageSerialization.saveINode2Image(node, out, true, referenceMap);
}
}
/** Serialize to out */
private void write(DataOutput out, ReferenceMap referenceMap
) throws IOException {
writeCreated(out);
writeDeleted(out, referenceMap);
}
/** Get the list of INodeDirectory contained in the deleted list */
private void getDirsInDeleted(List<INodeDirectory> dirList) {
for (INode node : getList(ListType.DELETED)) {
if (node.isDirectory()) {
dirList.add(node.asDirectory());
}
}
}
}
/**
* The difference of an {@link INodeDirectory} between two snapshots.
*/
public static class DirectoryDiff extends
AbstractINodeDiff<INodeDirectory, INodeDirectoryAttributes, DirectoryDiff> {
/** The size of the children list at snapshot creation time. */
private final int childrenSize;
/** The children list diff. */
private final ChildrenDiff diff;
private boolean isSnapshotRoot = false;
private DirectoryDiff(int snapshotId, INodeDirectory dir) {
super(snapshotId, null, null);
this.childrenSize = dir.getChildrenList(Snapshot.CURRENT_STATE_ID).size();
this.diff = new ChildrenDiff();
}
/** Constructor used by FSImage loading */
DirectoryDiff(int snapshotId, INodeDirectoryAttributes snapshotINode,
DirectoryDiff posteriorDiff, int childrenSize, List<INode> createdList,
List<INode> deletedList, boolean isSnapshotRoot) {
super(snapshotId, snapshotINode, posteriorDiff);
this.childrenSize = childrenSize;
this.diff = new ChildrenDiff(createdList, deletedList);
this.isSnapshotRoot = isSnapshotRoot;
}
public ChildrenDiff getChildrenDiff() {
return diff;
}
void setSnapshotRoot(INodeDirectoryAttributes root) {
this.snapshotINode = root;
this.isSnapshotRoot = true;
}
boolean isSnapshotRoot() {
return isSnapshotRoot;
}
@Override
void combinePosteriorAndCollectBlocks(
final INode.ReclaimContext reclaimContext,
final INodeDirectory currentDir,
final DirectoryDiff posterior) {
diff.combinePosterior(posterior.diff, new Diff.Processor<INode>() {
/** Collect blocks for deleted files. */
@Override
public void process(INode inode) {
if (inode != null) {
inode.destroyAndCollectBlocks(reclaimContext);
}
}
});
}
/**
* @return The children list of a directory in a snapshot.
* Since the snapshot is read-only, the logical view of the list is
* never changed although the internal data structure may mutate.
*/
private ReadOnlyList<INode> getChildrenList(final INodeDirectory currentDir) {
return new ReadOnlyList<INode>() {
private List<INode> children = null;
private List<INode> initChildren() {
if (children == null) {
final ChildrenDiff combined = new ChildrenDiff();
for (DirectoryDiff d = DirectoryDiff.this; d != null;
d = d.getPosterior()) {
combined.combinePosterior(d.diff, null);
}
children = combined.apply2Current(ReadOnlyList.Util.asList(
currentDir.getChildrenList(Snapshot.CURRENT_STATE_ID)));
}
return children;
}
@Override
public Iterator<INode> iterator() {
return initChildren().iterator();
}
@Override
public boolean isEmpty() {
return childrenSize == 0;
}
@Override
public int size() {
return childrenSize;
}
@Override
public INode get(int i) {
return initChildren().get(i);
}
};
}
/** @return the child with the given name. */
INode getChild(byte[] name, boolean checkPosterior,
INodeDirectory currentDir) {
for(DirectoryDiff d = this; ; d = d.getPosterior()) {
final Container<INode> returned = d.diff.accessPrevious(name);
if (returned != null) {
// the diff is able to determine the inode
return returned.getElement();
} else if (!checkPosterior) {
// Since checkPosterior is false, return null, i.e. not found.
return null;
} else if (d.getPosterior() == null) {
// no more posterior diff, get from current inode.
return currentDir.getChild(name, Snapshot.CURRENT_STATE_ID);
}
}
}
@Override
public String toString() {
return super.toString() + " childrenSize=" + childrenSize + ", " + diff;
}
int getChildrenSize() {
return childrenSize;
}
@Override
void write(DataOutput out, ReferenceMap referenceMap) throws IOException {
writeSnapshot(out);
out.writeInt(childrenSize);
// Write snapshotINode
out.writeBoolean(isSnapshotRoot);
if (!isSnapshotRoot) {
if (snapshotINode != null) {
out.writeBoolean(true);
FSImageSerialization.writeINodeDirectoryAttributes(snapshotINode, out);
} else {
out.writeBoolean(false);
}
}
// Write diff. Node need to write poseriorDiff, since diffs is a list.
diff.write(out, referenceMap);
}
@Override
void destroyDiffAndCollectBlocks(
INode.ReclaimContext reclaimContext, INodeDirectory currentINode) {
// this diff has been deleted
diff.destroyDeletedList(reclaimContext);
INodeDirectoryAttributes snapshotINode = getSnapshotINode();
if (snapshotINode != null && snapshotINode.getAclFeature() != null) {
AclStorage.removeAclFeature(snapshotINode.getAclFeature());
}
}
}
/** A list of directory diffs. */
public static class DirectoryDiffList
extends AbstractINodeDiffList<INodeDirectory, INodeDirectoryAttributes, DirectoryDiff> {
@Override
DirectoryDiff createDiff(int snapshot, INodeDirectory currentDir) {
return new DirectoryDiff(snapshot, currentDir);
}
@Override
INodeDirectoryAttributes createSnapshotCopy(INodeDirectory currentDir) {
return currentDir.isQuotaSet()?
new INodeDirectoryAttributes.CopyWithQuota(currentDir)
: new INodeDirectoryAttributes.SnapshotCopy(currentDir);
}
/** Replace the given child in the created/deleted list, if there is any. */
public boolean replaceChild(final ListType type, final INode oldChild,
final INode newChild) {
final List<DirectoryDiff> diffList = asList();
for(int i = diffList.size() - 1; i >= 0; i--) {
final ChildrenDiff diff = diffList.get(i).diff;
if (diff.replace(type, oldChild, newChild)) {
return true;
}
}
return false;
}
/** Remove the given child in the created/deleted list, if there is any. */
public boolean removeChild(final ListType type, final INode child) {
final List<DirectoryDiff> diffList = asList();
for(int i = diffList.size() - 1; i >= 0; i--) {
final ChildrenDiff diff = diffList.get(i).diff;
if (diff.removeChild(type, child)) {
return true;
}
}
return false;
}
/**
* Find the corresponding snapshot whose deleted list contains the given
* inode.
* @return the id of the snapshot. {@link Snapshot#NO_SNAPSHOT_ID} if the
* given inode is not in any of the snapshot.
*/
public int findSnapshotDeleted(final INode child) {
final List<DirectoryDiff> diffList = asList();
for(int i = diffList.size() - 1; i >= 0; i--) {
final ChildrenDiff diff = diffList.get(i).diff;
final int d = diff.searchIndex(ListType.DELETED,
child.getLocalNameBytes());
if (d >= 0 && diff.getList(ListType.DELETED).get(d) == child) {
return diffList.get(i).getSnapshotId();
}
}
return NO_SNAPSHOT_ID;
}
}
private static Map<INode, INode> cloneDiffList(List<INode> diffList) {
if (diffList == null || diffList.size() == 0) {
return null;
}
Map<INode, INode> map = new HashMap<>(diffList.size());
for (INode node : diffList) {
map.put(node, node);
}
return map;
}
/**
* Destroy a subtree under a DstReference node.
*/
public static void destroyDstSubtree(INode.ReclaimContext reclaimContext,
INode inode, final int snapshot, final int prior) {
Preconditions.checkArgument(prior != NO_SNAPSHOT_ID);
if (inode.isReference()) {
if (inode instanceof INodeReference.WithName
&& snapshot != Snapshot.CURRENT_STATE_ID) {
// this inode has been renamed before the deletion of the DstReference
// subtree
inode.cleanSubtree(reclaimContext, snapshot, prior);
} else {
// for DstReference node, continue this process to its subtree
destroyDstSubtree(reclaimContext,
inode.asReference().getReferredINode(), snapshot, prior);
}
} else if (inode.isFile()) {
inode.cleanSubtree(reclaimContext, snapshot, prior);
} else if (inode.isDirectory()) {
Map<INode, INode> excludedNodes = null;
INodeDirectory dir = inode.asDirectory();
DirectoryWithSnapshotFeature sf = dir.getDirectoryWithSnapshotFeature();
if (sf != null) {
DirectoryDiffList diffList = sf.getDiffs();
DirectoryDiff priorDiff = diffList.getDiffById(prior);
if (priorDiff != null && priorDiff.getSnapshotId() == prior) {
List<INode> dList = priorDiff.diff.getList(ListType.DELETED);
excludedNodes = cloneDiffList(dList);
}
if (snapshot != Snapshot.CURRENT_STATE_ID) {
diffList.deleteSnapshotDiff(reclaimContext,
snapshot, prior, dir);
}
priorDiff = diffList.getDiffById(prior);
if (priorDiff != null && priorDiff.getSnapshotId() == prior) {
priorDiff.diff.destroyCreatedList(reclaimContext, dir);
}
}
for (INode child : inode.asDirectory().getChildrenList(prior)) {
if (excludedNodes != null && excludedNodes.containsKey(child)) {
continue;
}
destroyDstSubtree(reclaimContext, child, snapshot, prior);
}
}
}
/**
* Clean an inode while we move it from the deleted list of post to the
* deleted list of prior.
* @param reclaimContext blocks and inodes that need to be reclaimed
* @param inode The inode to clean.
* @param post The post snapshot.
* @param prior The id of the prior snapshot.
*/
private static void cleanDeletedINode(INode.ReclaimContext reclaimContext,
INode inode, final int post, final int prior) {
Deque<INode> queue = new ArrayDeque<>();
queue.addLast(inode);
while (!queue.isEmpty()) {
INode topNode = queue.pollFirst();
if (topNode instanceof INodeReference.WithName) {
INodeReference.WithName wn = (INodeReference.WithName) topNode;
if (wn.getLastSnapshotId() >= post) {
wn.cleanSubtree(reclaimContext, post, prior);
}
// For DstReference node, since the node is not in the created list of
// prior, we should treat it as regular file/dir
} else if (topNode.isFile() && topNode.asFile().isWithSnapshot()) {
INodeFile file = topNode.asFile();
file.getDiffs().deleteSnapshotDiff(reclaimContext, post, prior, file);
} else if (topNode.isDirectory()) {
INodeDirectory dir = topNode.asDirectory();
ChildrenDiff priorChildrenDiff = null;
DirectoryWithSnapshotFeature sf = dir.getDirectoryWithSnapshotFeature();
if (sf != null) {
// delete files/dirs created after prior. Note that these
// files/dirs, along with inode, were deleted right after post.
DirectoryDiff priorDiff = sf.getDiffs().getDiffById(prior);
if (priorDiff != null && priorDiff.getSnapshotId() == prior) {
priorChildrenDiff = priorDiff.getChildrenDiff();
priorChildrenDiff.destroyCreatedList(reclaimContext, dir);
}
}
for (INode child : dir.getChildrenList(prior)) {
if (priorChildrenDiff != null && priorChildrenDiff.search(
ListType.DELETED, child.getLocalNameBytes()) != null) {
continue;
}
queue.addLast(child);
}
}
}
}
/** Diff list sorted by snapshot IDs, i.e. in chronological order. */
private final DirectoryDiffList diffs;
public DirectoryWithSnapshotFeature(DirectoryDiffList diffs) {
this.diffs = diffs != null ? diffs : new DirectoryDiffList();
}
/** @return the last snapshot. */
public int getLastSnapshotId() {
return diffs.getLastSnapshotId();
}
/** @return the snapshot diff list. */
public DirectoryDiffList getDiffs() {
return diffs;
}
/**
* Get all the directories that are stored in some snapshot but not in the
* current children list. These directories are equivalent to the directories
* stored in the deletes lists.
*/
public void getSnapshotDirectory(List<INodeDirectory> snapshotDir) {
for (DirectoryDiff sdiff : diffs) {
sdiff.getChildrenDiff().getDirsInDeleted(snapshotDir);
}
}
/**
* Add an inode into parent's children list. The caller of this method needs
* to make sure that parent is in the given snapshot "latest".
*/
public boolean addChild(INodeDirectory parent, INode inode,
boolean setModTime, int latestSnapshotId) throws QuotaExceededException {
ChildrenDiff diff = diffs.checkAndAddLatestSnapshotDiff(latestSnapshotId,
parent).diff;
int undoInfo = diff.create(inode);
final boolean added = parent.addChild(inode, setModTime,
Snapshot.CURRENT_STATE_ID);
if (!added) {
diff.undoCreate(inode, undoInfo);
}
return added;
}
/**
* Remove an inode from parent's children list. The caller of this method
* needs to make sure that parent is in the given snapshot "latest".
*/
public boolean removeChild(INodeDirectory parent, INode child,
int latestSnapshotId) {
// For a directory that is not a renamed node, if isInLatestSnapshot returns
// false, the directory is not in the latest snapshot, thus we do not need
// to record the removed child in any snapshot.
// For a directory that was moved/renamed, note that if the directory is in
// any of the previous snapshots, we will create a reference node for the
// directory while rename, and isInLatestSnapshot will return true in that
// scenario (if all previous snapshots have been deleted, isInLatestSnapshot
// still returns false). Thus if isInLatestSnapshot returns false, the
// directory node cannot be in any snapshot (not in current tree, nor in
// previous src tree). Thus we do not need to record the removed child in
// any snapshot.
ChildrenDiff diff = diffs.checkAndAddLatestSnapshotDiff(latestSnapshotId,
parent).diff;
UndoInfo<INode> undoInfo = diff.delete(child);
final boolean removed = parent.removeChild(child);
if (!removed && undoInfo != null) {
// remove failed, undo
diff.undoDelete(child, undoInfo);
}
return removed;
}
/**
* @return If there is no corresponding directory diff for the given
* snapshot, this means that the current children list should be
* returned for the snapshot. Otherwise we calculate the children list
* for the snapshot and return it.
*/
public ReadOnlyList<INode> getChildrenList(INodeDirectory currentINode,
final int snapshotId) {
final DirectoryDiff diff = diffs.getDiffById(snapshotId);
return diff != null ? diff.getChildrenList(currentINode) : currentINode
.getChildrenList(Snapshot.CURRENT_STATE_ID);
}
public INode getChild(INodeDirectory currentINode, byte[] name,
int snapshotId) {
final DirectoryDiff diff = diffs.getDiffById(snapshotId);
return diff != null ? diff.getChild(name, true, currentINode)
: currentINode.getChild(name, Snapshot.CURRENT_STATE_ID);
}
/** Used to record the modification of a symlink node */
public INode saveChild2Snapshot(INodeDirectory currentINode,
final INode child, final int latestSnapshotId, final INode snapshotCopy) {
Preconditions.checkArgument(!child.isDirectory(),
"child is a directory, child=%s", child);
Preconditions.checkArgument(latestSnapshotId != Snapshot.CURRENT_STATE_ID);
final DirectoryDiff diff = diffs.checkAndAddLatestSnapshotDiff(
latestSnapshotId, currentINode);
if (diff.getChild(child.getLocalNameBytes(), false, currentINode) != null) {
// it was already saved in the latest snapshot earlier.
return child;
}
diff.diff.modify(snapshotCopy, child);
return child;
}
public void clear(
INode.ReclaimContext reclaimContext, INodeDirectory currentINode) {
// destroy its diff list
for (DirectoryDiff diff : diffs) {
diff.destroyDiffAndCollectBlocks(reclaimContext, currentINode);
}
diffs.clear();
}
public QuotaCounts computeQuotaUsage4CurrentDirectory(
BlockStoragePolicySuite bsps, byte storagePolicyId) {
final QuotaCounts counts = new QuotaCounts.Builder().build();
for(DirectoryDiff d : diffs) {
for(INode deleted : d.getChildrenDiff().getList(ListType.DELETED)) {
final byte childPolicyId = deleted.getStoragePolicyIDForQuota(
storagePolicyId);
counts.add(deleted.computeQuotaUsage(bsps, childPolicyId, false,
Snapshot.CURRENT_STATE_ID));
}
}
return counts;
}
public void computeContentSummary4Snapshot(final BlockStoragePolicySuite bsps,
final ContentCounts counts) {
// Create a new blank summary context for blocking processing of subtree.
ContentSummaryComputationContext summary =
new ContentSummaryComputationContext(bsps);
for(DirectoryDiff d : diffs) {
for(INode deleted : d.getChildrenDiff().getList(ListType.DELETED)) {
deleted.computeContentSummary(summary);
}
}
// Add the counts from deleted trees.
counts.addContents(summary.getCounts());
// Add the deleted directory count.
counts.addContent(Content.DIRECTORY, diffs.asList().size());
}
/**
* Compute the difference between Snapshots.
*
* @param fromSnapshot Start point of the diff computation. Null indicates
* current tree.
* @param toSnapshot End point of the diff computation. Null indicates current
* tree.
* @param diff Used to capture the changes happening to the children. Note
* that the diff still represents (later_snapshot - earlier_snapshot)
* although toSnapshot can be before fromSnapshot.
* @param currentINode The {@link INodeDirectory} this feature belongs to.
* @return Whether changes happened between the startSnapshot and endSnaphsot.
*/
boolean computeDiffBetweenSnapshots(Snapshot fromSnapshot,
Snapshot toSnapshot, ChildrenDiff diff, INodeDirectory currentINode) {
int[] diffIndexPair = diffs.changedBetweenSnapshots(fromSnapshot,
toSnapshot);
if (diffIndexPair == null) {
return false;
}
int earlierDiffIndex = diffIndexPair[0];
int laterDiffIndex = diffIndexPair[1];
boolean dirMetadataChanged = false;
INodeDirectoryAttributes dirCopy = null;
List<DirectoryDiff> difflist = diffs.asList();
for (int i = earlierDiffIndex; i < laterDiffIndex; i++) {
DirectoryDiff sdiff = difflist.get(i);
diff.combinePosterior(sdiff.diff, null);
if (!dirMetadataChanged && sdiff.snapshotINode != null) {
if (dirCopy == null) {
dirCopy = sdiff.snapshotINode;
} else if (!dirCopy.metadataEquals(sdiff.snapshotINode)) {
dirMetadataChanged = true;
}
}
}
if (!diff.isEmpty() || dirMetadataChanged) {
return true;
} else if (dirCopy != null) {
for (int i = laterDiffIndex; i < difflist.size(); i++) {
if (!dirCopy.metadataEquals(difflist.get(i).snapshotINode)) {
return true;
}
}
return !dirCopy.metadataEquals(currentINode);
} else {
return false;
}
}
public void cleanDirectory(INode.ReclaimContext reclaimContext,
final INodeDirectory currentINode, final int snapshot, int prior) {
Map<INode, INode> priorCreated = null;
Map<INode, INode> priorDeleted = null;
QuotaCounts old = reclaimContext.quotaDelta().getCountsCopy();
if (snapshot == Snapshot.CURRENT_STATE_ID) { // delete the current directory
currentINode.recordModification(prior);
// delete everything in created list
DirectoryDiff lastDiff = diffs.getLast();
if (lastDiff != null) {
lastDiff.diff.destroyCreatedList(reclaimContext, currentINode);
}
currentINode.cleanSubtreeRecursively(reclaimContext, snapshot, prior,
null);
} else {
// update prior
prior = getDiffs().updatePrior(snapshot, prior);
// if there is a snapshot diff associated with prior, we need to record
// its original created and deleted list before deleting post
if (prior != NO_SNAPSHOT_ID) {
DirectoryDiff priorDiff = this.getDiffs().getDiffById(prior);
if (priorDiff != null && priorDiff.getSnapshotId() == prior) {
List<INode> cList = priorDiff.diff.getList(ListType.CREATED);
List<INode> dList = priorDiff.diff.getList(ListType.DELETED);
priorCreated = cloneDiffList(cList);
priorDeleted = cloneDiffList(dList);
}
}
getDiffs().deleteSnapshotDiff(reclaimContext, snapshot, prior,
currentINode);
currentINode.cleanSubtreeRecursively(reclaimContext, snapshot, prior,
priorDeleted);
// check priorDiff again since it may be created during the diff deletion
if (prior != NO_SNAPSHOT_ID) {
DirectoryDiff priorDiff = this.getDiffs().getDiffById(prior);
if (priorDiff != null && priorDiff.getSnapshotId() == prior) {
// For files/directories created between "prior" and "snapshot",
// we need to clear snapshot copies for "snapshot". Note that we must
// use null as prior in the cleanSubtree call. Files/directories that
// were created before "prior" will be covered by the later
// cleanSubtreeRecursively call.
if (priorCreated != null) {
// we only check the node originally in prior's created list
for (INode cNode : priorDiff.getChildrenDiff().getList(
ListType.CREATED)) {
if (priorCreated.containsKey(cNode)) {
cNode.cleanSubtree(reclaimContext, snapshot, NO_SNAPSHOT_ID);
}
}
}
// When a directory is moved from the deleted list of the posterior
// diff to the deleted list of this diff, we need to destroy its
// descendants that were 1) created after taking this diff and 2)
// deleted after taking posterior diff.
// For files moved from posterior's deleted list, we also need to
// delete its snapshot copy associated with the posterior snapshot.
for (INode dNode : priorDiff.getChildrenDiff().getList(
ListType.DELETED)) {
if (priorDeleted == null || !priorDeleted.containsKey(dNode)) {
cleanDeletedINode(reclaimContext, dNode, snapshot, prior);
}
}
}
}
}
QuotaCounts current = reclaimContext.quotaDelta().getCountsCopy();
current.subtract(old);
if (currentINode.isQuotaSet()) {
reclaimContext.quotaDelta().addQuotaDirUpdate(currentINode, current);
}
}
}
| 29,381 | 37.158442 | 94 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode.snapshot;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.atomic.AtomicInteger;
import javax.management.ObjectName;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.DFSUtilClient;
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
import org.apache.hadoop.hdfs.protocol.SnapshotException;
import org.apache.hadoop.hdfs.protocol.SnapshotInfo;
import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffReportEntry;
import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
import org.apache.hadoop.hdfs.server.namenode.FSImageFormat;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.INode;
import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
import org.apache.hadoop.hdfs.server.namenode.INodesInPath;
import org.apache.hadoop.metrics2.util.MBeans;
import com.google.common.base.Preconditions;
/**
* Manage snapshottable directories and their snapshots.
*
* This class includes operations that create, access, modify snapshots and/or
* snapshot-related data. In general, the locking structure of snapshot
* operations is: <br>
*
* 1. Lock the {@link FSNamesystem} lock in {@link FSNamesystem} before calling
* into {@link SnapshotManager} methods.<br>
* 2. Lock the {@link FSDirectory} lock for the {@link SnapshotManager} methods
* if necessary.
*/
public class SnapshotManager implements SnapshotStatsMXBean {
private boolean allowNestedSnapshots = false;
private final FSDirectory fsdir;
private static final int SNAPSHOT_ID_BIT_WIDTH = 24;
private final AtomicInteger numSnapshots = new AtomicInteger();
private int snapshotCounter = 0;
/** All snapshottable directories in the namesystem. */
private final Map<Long, INodeDirectory> snapshottables =
new HashMap<Long, INodeDirectory>();
public SnapshotManager(final FSDirectory fsdir) {
this.fsdir = fsdir;
}
/** Used in tests only */
void setAllowNestedSnapshots(boolean allowNestedSnapshots) {
this.allowNestedSnapshots = allowNestedSnapshots;
}
private void checkNestedSnapshottable(INodeDirectory dir, String path)
throws SnapshotException {
if (allowNestedSnapshots) {
return;
}
for(INodeDirectory s : snapshottables.values()) {
if (s.isAncestorDirectory(dir)) {
throw new SnapshotException(
"Nested snapshottable directories not allowed: path=" + path
+ ", the subdirectory " + s.getFullPathName()
+ " is already a snapshottable directory.");
}
if (dir.isAncestorDirectory(s)) {
throw new SnapshotException(
"Nested snapshottable directories not allowed: path=" + path
+ ", the ancestor " + s.getFullPathName()
+ " is already a snapshottable directory.");
}
}
}
/**
* Set the given directory as a snapshottable directory.
* If the path is already a snapshottable directory, update the quota.
*/
public void setSnapshottable(final String path, boolean checkNestedSnapshottable)
throws IOException {
final INodesInPath iip = fsdir.getINodesInPath4Write(path);
final INodeDirectory d = INodeDirectory.valueOf(iip.getLastINode(), path);
if (checkNestedSnapshottable) {
checkNestedSnapshottable(d, path);
}
if (d.isSnapshottable()) {
//The directory is already a snapshottable directory.
d.setSnapshotQuota(DirectorySnapshottableFeature.SNAPSHOT_LIMIT);
} else {
d.addSnapshottableFeature();
}
addSnapshottable(d);
}
/** Add the given snapshottable directory to {@link #snapshottables}. */
public void addSnapshottable(INodeDirectory dir) {
Preconditions.checkArgument(dir.isSnapshottable());
snapshottables.put(dir.getId(), dir);
}
/** Remove the given snapshottable directory from {@link #snapshottables}. */
private void removeSnapshottable(INodeDirectory s) {
snapshottables.remove(s.getId());
}
/** Remove snapshottable directories from {@link #snapshottables} */
public void removeSnapshottable(List<INodeDirectory> toRemove) {
if (toRemove != null) {
for (INodeDirectory s : toRemove) {
removeSnapshottable(s);
}
}
}
/**
* Set the given snapshottable directory to non-snapshottable.
*
* @throws SnapshotException if there are snapshots in the directory.
*/
public void resetSnapshottable(final String path) throws IOException {
final INodesInPath iip = fsdir.getINodesInPath4Write(path);
final INodeDirectory d = INodeDirectory.valueOf(iip.getLastINode(), path);
DirectorySnapshottableFeature sf = d.getDirectorySnapshottableFeature();
if (sf == null) {
// the directory is already non-snapshottable
return;
}
if (sf.getNumSnapshots() > 0) {
throw new SnapshotException("The directory " + path + " has snapshot(s). "
+ "Please redo the operation after removing all the snapshots.");
}
if (d == fsdir.getRoot()) {
d.setSnapshotQuota(0);
} else {
d.removeSnapshottableFeature();
}
removeSnapshottable(d);
}
/**
* Find the source root directory where the snapshot will be taken
* for a given path.
*
* @return Snapshottable directory.
* @throws IOException
* Throw IOException when the given path does not lead to an
* existing snapshottable directory.
*/
public INodeDirectory getSnapshottableRoot(final INodesInPath iip)
throws IOException {
final String path = iip.getPath();
final INodeDirectory dir = INodeDirectory.valueOf(iip.getLastINode(), path);
if (!dir.isSnapshottable()) {
throw new SnapshotException(
"Directory is not a snapshottable directory: " + path);
}
return dir;
}
/**
* Create a snapshot of the given path.
* It is assumed that the caller will perform synchronization.
*
* @param iip the INodes resolved from the snapshottable directory's path
* @param snapshotName
* The name of the snapshot.
* @throws IOException
* Throw IOException when 1) the given path does not lead to an
* existing snapshottable directory, and/or 2) there exists a
* snapshot with the given name for the directory, and/or 3)
* snapshot number exceeds quota
*/
public String createSnapshot(final INodesInPath iip, String snapshotRoot,
String snapshotName) throws IOException {
INodeDirectory srcRoot = getSnapshottableRoot(iip);
if (snapshotCounter == getMaxSnapshotID()) {
// We have reached the maximum allowable snapshot ID and since we don't
// handle rollover we will fail all subsequent snapshot creation
// requests.
throw new SnapshotException(
"Failed to create the snapshot. The FileSystem has run out of " +
"snapshot IDs and ID rollover is not supported.");
}
srcRoot.addSnapshot(snapshotCounter, snapshotName);
//create success, update id
snapshotCounter++;
numSnapshots.getAndIncrement();
return Snapshot.getSnapshotPath(snapshotRoot, snapshotName);
}
/**
* Delete a snapshot for a snapshottable directory
* @param snapshotName Name of the snapshot to be deleted
* @param reclaimContext Used to collect information to reclaim blocks
* and inodes
*/
public void deleteSnapshot(final INodesInPath iip, final String snapshotName,
INode.ReclaimContext reclaimContext) throws IOException {
INodeDirectory srcRoot = getSnapshottableRoot(iip);
srcRoot.removeSnapshot(reclaimContext, snapshotName);
numSnapshots.getAndDecrement();
}
/**
* Rename the given snapshot
* @param oldSnapshotName
* Old name of the snapshot
* @param newSnapshotName
* New name of the snapshot
* @throws IOException
* Throw IOException when 1) the given path does not lead to an
* existing snapshottable directory, and/or 2) the snapshot with the
* old name does not exist for the directory, and/or 3) there exists
* a snapshot with the new name for the directory
*/
public void renameSnapshot(final INodesInPath iip, final String snapshotRoot,
final String oldSnapshotName, final String newSnapshotName)
throws IOException {
final INodeDirectory srcRoot = getSnapshottableRoot(iip);
srcRoot.renameSnapshot(snapshotRoot, oldSnapshotName, newSnapshotName);
}
public int getNumSnapshottableDirs() {
return snapshottables.size();
}
public int getNumSnapshots() {
return numSnapshots.get();
}
void setNumSnapshots(int num) {
numSnapshots.set(num);
}
int getSnapshotCounter() {
return snapshotCounter;
}
void setSnapshotCounter(int counter) {
snapshotCounter = counter;
}
INodeDirectory[] getSnapshottableDirs() {
return snapshottables.values().toArray(
new INodeDirectory[snapshottables.size()]);
}
/**
* Write {@link #snapshotCounter}, {@link #numSnapshots},
* and all snapshots to the DataOutput.
*/
public void write(DataOutput out) throws IOException {
out.writeInt(snapshotCounter);
out.writeInt(numSnapshots.get());
// write all snapshots.
for(INodeDirectory snapshottableDir : snapshottables.values()) {
for (Snapshot s : snapshottableDir.getDirectorySnapshottableFeature()
.getSnapshotList()) {
s.write(out);
}
}
}
/**
* Read values of {@link #snapshotCounter}, {@link #numSnapshots}, and
* all snapshots from the DataInput
*/
public Map<Integer, Snapshot> read(DataInput in, FSImageFormat.Loader loader
) throws IOException {
snapshotCounter = in.readInt();
numSnapshots.set(in.readInt());
// read snapshots
final Map<Integer, Snapshot> snapshotMap = new HashMap<Integer, Snapshot>();
for(int i = 0; i < numSnapshots.get(); i++) {
final Snapshot s = Snapshot.read(in, loader);
snapshotMap.put(s.getId(), s);
}
return snapshotMap;
}
/**
* List all the snapshottable directories that are owned by the current user.
* @param userName Current user name.
* @return Snapshottable directories that are owned by the current user,
* represented as an array of {@link SnapshottableDirectoryStatus}. If
* {@code userName} is null, return all the snapshottable dirs.
*/
public SnapshottableDirectoryStatus[] getSnapshottableDirListing(
String userName) {
if (snapshottables.isEmpty()) {
return null;
}
List<SnapshottableDirectoryStatus> statusList =
new ArrayList<SnapshottableDirectoryStatus>();
for (INodeDirectory dir : snapshottables.values()) {
if (userName == null || userName.equals(dir.getUserName())) {
SnapshottableDirectoryStatus status = new SnapshottableDirectoryStatus(
dir.getModificationTime(), dir.getAccessTime(),
dir.getFsPermission(), dir.getUserName(), dir.getGroupName(),
dir.getLocalNameBytes(), dir.getId(),
dir.getChildrenNum(Snapshot.CURRENT_STATE_ID),
dir.getDirectorySnapshottableFeature().getNumSnapshots(),
dir.getDirectorySnapshottableFeature().getSnapshotQuota(),
dir.getParent() == null ? DFSUtilClient.EMPTY_BYTES :
DFSUtil.string2Bytes(dir.getParent().getFullPathName()));
statusList.add(status);
}
}
Collections.sort(statusList, SnapshottableDirectoryStatus.COMPARATOR);
return statusList.toArray(
new SnapshottableDirectoryStatus[statusList.size()]);
}
/**
* Compute the difference between two snapshots of a directory, or between a
* snapshot of the directory and its current tree.
*/
public SnapshotDiffReport diff(final INodesInPath iip,
final String snapshotRootPath, final String from,
final String to) throws IOException {
// Find the source root directory path where the snapshots were taken.
// All the check for path has been included in the valueOf method.
final INodeDirectory snapshotRoot = getSnapshottableRoot(iip);
if ((from == null || from.isEmpty())
&& (to == null || to.isEmpty())) {
// both fromSnapshot and toSnapshot indicate the current tree
return new SnapshotDiffReport(snapshotRootPath, from, to,
Collections.<DiffReportEntry> emptyList());
}
final SnapshotDiffInfo diffs = snapshotRoot
.getDirectorySnapshottableFeature().computeDiff(snapshotRoot, from, to);
return diffs != null ? diffs.generateReport() : new SnapshotDiffReport(
snapshotRootPath, from, to, Collections.<DiffReportEntry> emptyList());
}
public void clearSnapshottableDirs() {
snapshottables.clear();
}
/**
* Returns the maximum allowable snapshot ID based on the bit width of the
* snapshot ID.
*
* @return maximum allowable snapshot ID.
*/
public int getMaxSnapshotID() {
return ((1 << SNAPSHOT_ID_BIT_WIDTH) - 1);
}
private ObjectName mxBeanName;
public void registerMXBean() {
mxBeanName = MBeans.register("NameNode", "SnapshotInfo", this);
}
public void shutdown() {
MBeans.unregister(mxBeanName);
mxBeanName = null;
}
@Override // SnapshotStatsMXBean
public SnapshottableDirectoryStatus.Bean[]
getSnapshottableDirectories() {
List<SnapshottableDirectoryStatus.Bean> beans =
new ArrayList<SnapshottableDirectoryStatus.Bean>();
for (INodeDirectory d : getSnapshottableDirs()) {
beans.add(toBean(d));
}
return beans.toArray(new SnapshottableDirectoryStatus.Bean[beans.size()]);
}
@Override // SnapshotStatsMXBean
public SnapshotInfo.Bean[] getSnapshots() {
List<SnapshotInfo.Bean> beans = new ArrayList<SnapshotInfo.Bean>();
for (INodeDirectory d : getSnapshottableDirs()) {
for (Snapshot s : d.getDirectorySnapshottableFeature().getSnapshotList()) {
beans.add(toBean(s));
}
}
return beans.toArray(new SnapshotInfo.Bean[beans.size()]);
}
public static SnapshottableDirectoryStatus.Bean toBean(INodeDirectory d) {
return new SnapshottableDirectoryStatus.Bean(
d.getFullPathName(),
d.getDirectorySnapshottableFeature().getNumSnapshots(),
d.getDirectorySnapshottableFeature().getSnapshotQuota(),
d.getModificationTime(),
Short.valueOf(Integer.toOctalString(
d.getFsPermissionShort())),
d.getUserName(),
d.getGroupName());
}
public static SnapshotInfo.Bean toBean(Snapshot s) {
return new SnapshotInfo.Bean(
s.getRoot().getLocalName(), s.getRoot().getFullPathName(),
s.getRoot().getModificationTime());
}
}
| 15,873 | 35.077273 | 83 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotFSImageFormat.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode.snapshot;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.server.namenode.FSImageFormat;
import org.apache.hadoop.hdfs.server.namenode.FSImageSerialization;
import org.apache.hadoop.hdfs.server.namenode.INode;
import org.apache.hadoop.hdfs.server.namenode.INodeAttributes;
import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
import org.apache.hadoop.hdfs.server.namenode.INodeDirectoryAttributes;
import org.apache.hadoop.hdfs.server.namenode.INodeFile;
import org.apache.hadoop.hdfs.server.namenode.INodeFileAttributes;
import org.apache.hadoop.hdfs.server.namenode.INodeReference;
import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.DirectoryDiff;
import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.DirectoryDiffList;
import org.apache.hadoop.hdfs.tools.snapshot.SnapshotDiff;
import org.apache.hadoop.hdfs.util.Diff.ListType;
import org.apache.hadoop.hdfs.util.ReadOnlyList;
import com.google.common.base.Preconditions;
/**
* A helper class defining static methods for reading/writing snapshot related
* information from/to FSImage.
*/
public class SnapshotFSImageFormat {
/**
* Save snapshots and snapshot quota for a snapshottable directory.
* @param current The directory that the snapshots belongs to.
* @param out The {@link DataOutput} to write.
* @throws IOException
*/
public static void saveSnapshots(INodeDirectory current, DataOutput out)
throws IOException {
DirectorySnapshottableFeature sf = current.getDirectorySnapshottableFeature();
Preconditions.checkArgument(sf != null);
// list of snapshots in snapshotsByNames
ReadOnlyList<Snapshot> snapshots = sf.getSnapshotList();
out.writeInt(snapshots.size());
for (Snapshot s : snapshots) {
// write the snapshot id
out.writeInt(s.getId());
}
// snapshot quota
out.writeInt(sf.getSnapshotQuota());
}
/**
* Save SnapshotDiff list for an INodeDirectoryWithSnapshot.
* @param sNode The directory that the SnapshotDiff list belongs to.
* @param out The {@link DataOutput} to write.
*/
private static <N extends INode, A extends INodeAttributes, D extends AbstractINodeDiff<N, A, D>>
void saveINodeDiffs(final AbstractINodeDiffList<N, A, D> diffs,
final DataOutput out, ReferenceMap referenceMap) throws IOException {
// Record the diffs in reversed order, so that we can find the correct
// reference for INodes in the created list when loading the FSImage
if (diffs == null) {
out.writeInt(-1); // no diffs
} else {
final List<D> list = diffs.asList();
final int size = list.size();
out.writeInt(size);
for (int i = size - 1; i >= 0; i--) {
list.get(i).write(out, referenceMap);
}
}
}
public static void saveDirectoryDiffList(final INodeDirectory dir,
final DataOutput out, final ReferenceMap referenceMap
) throws IOException {
saveINodeDiffs(dir.getDiffs(), out, referenceMap);
}
public static void saveFileDiffList(final INodeFile file,
final DataOutput out) throws IOException {
saveINodeDiffs(file.getDiffs(), out, null);
}
public static FileDiffList loadFileDiffList(DataInput in,
FSImageFormat.Loader loader) throws IOException {
final int size = in.readInt();
if (size == -1) {
return null;
} else {
final FileDiffList diffs = new FileDiffList();
FileDiff posterior = null;
for(int i = 0; i < size; i++) {
final FileDiff d = loadFileDiff(posterior, in, loader);
diffs.addFirst(d);
posterior = d;
}
return diffs;
}
}
private static FileDiff loadFileDiff(FileDiff posterior, DataInput in,
FSImageFormat.Loader loader) throws IOException {
// 1. Read the id of the Snapshot root to identify the Snapshot
final Snapshot snapshot = loader.getSnapshot(in);
// 2. Load file size
final long fileSize = in.readLong();
// 3. Load snapshotINode
final INodeFileAttributes snapshotINode = in.readBoolean()?
loader.loadINodeFileAttributes(in): null;
return new FileDiff(snapshot.getId(), snapshotINode, posterior, fileSize);
}
/**
* Load a node stored in the created list from fsimage.
* @param createdNodeName The name of the created node.
* @param parent The directory that the created list belongs to.
* @return The created node.
*/
public static INode loadCreated(byte[] createdNodeName,
INodeDirectory parent) throws IOException {
// the INode in the created list should be a reference to another INode
// in posterior SnapshotDiffs or one of the current children
for (DirectoryDiff postDiff : parent.getDiffs()) {
final INode d = postDiff.getChildrenDiff().search(ListType.DELETED,
createdNodeName);
if (d != null) {
return d;
} // else go to the next SnapshotDiff
}
// use the current child
INode currentChild = parent.getChild(createdNodeName,
Snapshot.CURRENT_STATE_ID);
if (currentChild == null) {
throw new IOException("Cannot find an INode associated with the INode "
+ DFSUtil.bytes2String(createdNodeName)
+ " in created list while loading FSImage.");
}
return currentChild;
}
/**
* Load the created list from fsimage.
* @param parent The directory that the created list belongs to.
* @param in The {@link DataInput} to read.
* @return The created list.
*/
private static List<INode> loadCreatedList(INodeDirectory parent,
DataInput in) throws IOException {
// read the size of the created list
int createdSize = in.readInt();
List<INode> createdList = new ArrayList<INode>(createdSize);
for (int i = 0; i < createdSize; i++) {
byte[] createdNodeName = FSImageSerialization.readLocalName(in);
INode created = loadCreated(createdNodeName, parent);
createdList.add(created);
}
return createdList;
}
/**
* Load the deleted list from the fsimage.
*
* @param parent The directory that the deleted list belongs to.
* @param createdList The created list associated with the deleted list in
* the same Diff.
* @param in The {@link DataInput} to read.
* @param loader The {@link Loader} instance.
* @return The deleted list.
*/
private static List<INode> loadDeletedList(INodeDirectory parent,
List<INode> createdList, DataInput in, FSImageFormat.Loader loader)
throws IOException {
int deletedSize = in.readInt();
List<INode> deletedList = new ArrayList<INode>(deletedSize);
for (int i = 0; i < deletedSize; i++) {
final INode deleted = loader.loadINodeWithLocalName(true, in, true);
deletedList.add(deleted);
// set parent: the parent field of an INode in the deleted list is not
// useful, but set the parent here to be consistent with the original
// fsdir tree.
deleted.setParent(parent);
if (deleted.isFile()) {
loader.updateBlocksMap(deleted.asFile());
}
}
return deletedList;
}
/**
* Load snapshots and snapshotQuota for a Snapshottable directory.
*
* @param snapshottableParent
* The snapshottable directory for loading.
* @param numSnapshots
* The number of snapshots that the directory has.
* @param loader
* The loader
*/
public static void loadSnapshotList(INodeDirectory snapshottableParent,
int numSnapshots, DataInput in, FSImageFormat.Loader loader)
throws IOException {
DirectorySnapshottableFeature sf = snapshottableParent
.getDirectorySnapshottableFeature();
Preconditions.checkArgument(sf != null);
for (int i = 0; i < numSnapshots; i++) {
// read snapshots
final Snapshot s = loader.getSnapshot(in);
s.getRoot().setParent(snapshottableParent);
sf.addSnapshot(s);
}
int snapshotQuota = in.readInt();
snapshottableParent.setSnapshotQuota(snapshotQuota);
}
/**
* Load the {@link SnapshotDiff} list for the INodeDirectoryWithSnapshot
* directory.
*
* @param dir
* The snapshottable directory for loading.
* @param in
* The {@link DataInput} instance to read.
* @param loader
* The loader
*/
public static void loadDirectoryDiffList(INodeDirectory dir,
DataInput in, FSImageFormat.Loader loader) throws IOException {
final int size = in.readInt();
if (dir.isWithSnapshot()) {
DirectoryDiffList diffs = dir.getDiffs();
for (int i = 0; i < size; i++) {
diffs.addFirst(loadDirectoryDiff(dir, in, loader));
}
}
}
/**
* Load the snapshotINode field of {@link AbstractINodeDiff}.
* @param snapshot The Snapshot associated with the {@link AbstractINodeDiff}.
* @param in The {@link DataInput} to read.
* @param loader The {@link Loader} instance that this loading procedure is
* using.
* @return The snapshotINode.
*/
private static INodeDirectoryAttributes loadSnapshotINodeInDirectoryDiff(
Snapshot snapshot, DataInput in, FSImageFormat.Loader loader)
throws IOException {
// read the boolean indicating whether snapshotINode == Snapshot.Root
boolean useRoot = in.readBoolean();
if (useRoot) {
return snapshot.getRoot();
} else {
// another boolean is used to indicate whether snapshotINode is non-null
return in.readBoolean()? loader.loadINodeDirectoryAttributes(in): null;
}
}
/**
* Load {@link DirectoryDiff} from fsimage.
* @param parent The directory that the SnapshotDiff belongs to.
* @param in The {@link DataInput} instance to read.
* @param loader The {@link Loader} instance that this loading procedure is
* using.
* @return A {@link DirectoryDiff}.
*/
private static DirectoryDiff loadDirectoryDiff(INodeDirectory parent,
DataInput in, FSImageFormat.Loader loader) throws IOException {
// 1. Read the full path of the Snapshot root to identify the Snapshot
final Snapshot snapshot = loader.getSnapshot(in);
// 2. Load DirectoryDiff#childrenSize
int childrenSize = in.readInt();
// 3. Load DirectoryDiff#snapshotINode
INodeDirectoryAttributes snapshotINode = loadSnapshotINodeInDirectoryDiff(
snapshot, in, loader);
// 4. Load the created list in SnapshotDiff#Diff
List<INode> createdList = loadCreatedList(parent, in);
// 5. Load the deleted list in SnapshotDiff#Diff
List<INode> deletedList = loadDeletedList(parent, createdList, in, loader);
// 6. Compose the SnapshotDiff
List<DirectoryDiff> diffs = parent.getDiffs().asList();
DirectoryDiff sdiff = new DirectoryDiff(snapshot.getId(), snapshotINode,
diffs.isEmpty() ? null : diffs.get(0), childrenSize, createdList,
deletedList, snapshotINode == snapshot.getRoot());
return sdiff;
}
/** A reference map for fsimage serialization. */
public static class ReferenceMap {
/**
* Used to indicate whether the reference node itself has been saved
*/
private final Map<Long, INodeReference.WithCount> referenceMap
= new HashMap<Long, INodeReference.WithCount>();
/**
* Used to record whether the subtree of the reference node has been saved
*/
private final Map<Long, Long> dirMap = new HashMap<Long, Long>();
public void writeINodeReferenceWithCount(
INodeReference.WithCount withCount, DataOutput out,
boolean writeUnderConstruction) throws IOException {
final INode referred = withCount.getReferredINode();
final long id = withCount.getId();
final boolean firstReferred = !referenceMap.containsKey(id);
out.writeBoolean(firstReferred);
if (firstReferred) {
FSImageSerialization.saveINode2Image(referred, out,
writeUnderConstruction, this);
referenceMap.put(id, withCount);
} else {
out.writeLong(id);
}
}
public boolean toProcessSubtree(long id) {
if (dirMap.containsKey(id)) {
return false;
} else {
dirMap.put(id, id);
return true;
}
}
public INodeReference.WithCount loadINodeReferenceWithCount(
boolean isSnapshotINode, DataInput in, FSImageFormat.Loader loader
) throws IOException {
final boolean firstReferred = in.readBoolean();
final INodeReference.WithCount withCount;
if (firstReferred) {
final INode referred = loader.loadINodeWithLocalName(isSnapshotINode,
in, true);
withCount = new INodeReference.WithCount(null, referred);
referenceMap.put(withCount.getId(), withCount);
} else {
final long id = in.readLong();
withCount = referenceMap.get(id);
}
return withCount;
}
}
}
| 13,989 | 36.406417 | 102 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/AbstractINodeDiffList.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode.snapshot;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Iterator;
import java.util.List;
import org.apache.hadoop.hdfs.server.namenode.INode;
import org.apache.hadoop.hdfs.server.namenode.INodeAttributes;
/**
* A list of snapshot diffs for storing snapshot data.
*
* @param <N> The {@link INode} type.
* @param <D> The diff type, which must extend {@link AbstractINodeDiff}.
*/
abstract class AbstractINodeDiffList<N extends INode,
A extends INodeAttributes,
D extends AbstractINodeDiff<N, A, D>>
implements Iterable<D> {
/** Diff list sorted by snapshot IDs, i.e. in chronological order. */
private final List<D> diffs = new ArrayList<D>();
/** @return this list as a unmodifiable {@link List}. */
public final List<D> asList() {
return Collections.unmodifiableList(diffs);
}
/** Get the size of the list and then clear it. */
public void clear() {
diffs.clear();
}
/** @return an {@link AbstractINodeDiff}. */
abstract D createDiff(int snapshotId, N currentINode);
/** @return a snapshot copy of the current inode. */
abstract A createSnapshotCopy(N currentINode);
/**
* Delete a snapshot. The synchronization of the diff list will be done
* outside. If the diff to remove is not the first one in the diff list, we
* need to combine the diff with its previous one.
*
* @param reclaimContext blocks and inodes that need to be reclaimed
* @param snapshot The id of the snapshot to be deleted
* @param prior The id of the snapshot taken before the to-be-deleted snapshot
* @param currentINode the inode where the snapshot diff is deleted
*/
public final void deleteSnapshotDiff(INode.ReclaimContext reclaimContext,
final int snapshot, final int prior, final N currentINode) {
int snapshotIndex = Collections.binarySearch(diffs, snapshot);
D removed;
if (snapshotIndex == 0) {
if (prior != Snapshot.NO_SNAPSHOT_ID) { // there is still snapshot before
// set the snapshot to latestBefore
diffs.get(snapshotIndex).setSnapshotId(prior);
} else { // there is no snapshot before
removed = diffs.remove(0);
removed.destroyDiffAndCollectBlocks(reclaimContext, currentINode);
}
} else if (snapshotIndex > 0) {
final AbstractINodeDiff<N, A, D> previous = diffs.get(snapshotIndex - 1);
if (previous.getSnapshotId() != prior) {
diffs.get(snapshotIndex).setSnapshotId(prior);
} else {
// combine the to-be-removed diff with its previous diff
removed = diffs.remove(snapshotIndex);
if (previous.snapshotINode == null) {
previous.snapshotINode = removed.snapshotINode;
}
previous.combinePosteriorAndCollectBlocks(reclaimContext, currentINode,
removed);
previous.setPosterior(removed.getPosterior());
removed.setPosterior(null);
}
}
}
/** Add an {@link AbstractINodeDiff} for the given snapshot. */
final D addDiff(int latestSnapshotId, N currentINode) {
return addLast(createDiff(latestSnapshotId, currentINode));
}
/** Append the diff at the end of the list. */
private D addLast(D diff) {
final D last = getLast();
diffs.add(diff);
if (last != null) {
last.setPosterior(diff);
}
return diff;
}
/** Add the diff to the beginning of the list. */
final void addFirst(D diff) {
final D first = diffs.isEmpty()? null: diffs.get(0);
diffs.add(0, diff);
diff.setPosterior(first);
}
/** @return the last diff. */
public final D getLast() {
final int n = diffs.size();
return n == 0? null: diffs.get(n - 1);
}
/** @return the id of the last snapshot. */
public final int getLastSnapshotId() {
final AbstractINodeDiff<N, A, D> last = getLast();
return last == null ? Snapshot.CURRENT_STATE_ID : last.getSnapshotId();
}
/**
* Find the latest snapshot before a given snapshot.
* @param anchorId The returned snapshot's id must be <= or < this given
* snapshot id.
* @param exclusive True means the returned snapshot's id must be < the given
* id, otherwise <=.
* @return The id of the latest snapshot before the given snapshot.
*/
public final int getPrior(int anchorId, boolean exclusive) {
if (anchorId == Snapshot.CURRENT_STATE_ID) {
int last = getLastSnapshotId();
if(exclusive && last == anchorId)
return Snapshot.NO_SNAPSHOT_ID;
return last;
}
final int i = Collections.binarySearch(diffs, anchorId);
if (exclusive) { // must be the one before
if (i == -1 || i == 0) {
return Snapshot.NO_SNAPSHOT_ID;
} else {
int priorIndex = i > 0 ? i - 1 : -i - 2;
return diffs.get(priorIndex).getSnapshotId();
}
} else { // the one, or the one before if not existing
if (i >= 0) {
return diffs.get(i).getSnapshotId();
} else if (i < -1) {
return diffs.get(-i - 2).getSnapshotId();
} else { // i == -1
return Snapshot.NO_SNAPSHOT_ID;
}
}
}
public final int getPrior(int snapshotId) {
return getPrior(snapshotId, false);
}
/**
* Update the prior snapshot.
*/
final int updatePrior(int snapshot, int prior) {
int p = getPrior(snapshot, true);
if (p != Snapshot.CURRENT_STATE_ID
&& Snapshot.ID_INTEGER_COMPARATOR.compare(p, prior) > 0) {
return p;
}
return prior;
}
public final D getDiffById(final int snapshotId) {
if (snapshotId == Snapshot.CURRENT_STATE_ID) {
return null;
}
final int i = Collections.binarySearch(diffs, snapshotId);
if (i >= 0) {
// exact match
return diffs.get(i);
} else {
// Exact match not found means that there were no changes between
// given snapshot and the next state so that the diff for the given
// snapshot was not recorded. Thus, return the next state.
final int j = -i - 1;
return j < diffs.size()? diffs.get(j): null;
}
}
/**
* Search for the snapshot whose id is 1) no less than the given id,
* and 2) most close to the given id.
*/
public final int getSnapshotById(final int snapshotId) {
D diff = getDiffById(snapshotId);
return diff == null ? Snapshot.CURRENT_STATE_ID : diff.getSnapshotId();
}
final int[] changedBetweenSnapshots(Snapshot from, Snapshot to) {
Snapshot earlier = from;
Snapshot later = to;
if (Snapshot.ID_COMPARATOR.compare(from, to) > 0) {
earlier = to;
later = from;
}
final int size = diffs.size();
int earlierDiffIndex = Collections.binarySearch(diffs, earlier.getId());
int laterDiffIndex = later == null ? size : Collections
.binarySearch(diffs, later.getId());
if (-earlierDiffIndex - 1 == size) {
// if the earlierSnapshot is after the latest SnapshotDiff stored in
// diffs, no modification happened after the earlierSnapshot
return null;
}
if (laterDiffIndex == -1 || laterDiffIndex == 0) {
// if the laterSnapshot is the earliest SnapshotDiff stored in diffs, or
// before it, no modification happened before the laterSnapshot
return null;
}
earlierDiffIndex = earlierDiffIndex < 0 ? (-earlierDiffIndex - 1)
: earlierDiffIndex;
laterDiffIndex = laterDiffIndex < 0 ? (-laterDiffIndex - 1)
: laterDiffIndex;
return new int[]{earlierDiffIndex, laterDiffIndex};
}
/**
* @return the inode corresponding to the given snapshot.
* Note that the current inode is returned if there is no change
* between the given snapshot and the current state.
*/
public A getSnapshotINode(final int snapshotId, final A currentINode) {
final D diff = getDiffById(snapshotId);
final A inode = diff == null? null: diff.getSnapshotINode();
return inode == null? currentINode: inode;
}
/**
* Check if the latest snapshot diff exists. If not, add it.
* @return the latest snapshot diff, which is never null.
*/
final D checkAndAddLatestSnapshotDiff(int latestSnapshotId, N currentINode) {
final D last = getLast();
return (last != null && Snapshot.ID_INTEGER_COMPARATOR
.compare(last.getSnapshotId(), latestSnapshotId) >= 0) ?
last : addDiff(latestSnapshotId, currentINode);
}
/** Save the snapshot copy to the latest snapshot. */
public D saveSelf2Snapshot(int latestSnapshotId, N currentINode,
A snapshotCopy) {
D diff = null;
if (latestSnapshotId != Snapshot.CURRENT_STATE_ID) {
diff = checkAndAddLatestSnapshotDiff(latestSnapshotId, currentINode);
if (diff.snapshotINode == null) {
if (snapshotCopy == null) {
snapshotCopy = createSnapshotCopy(currentINode);
}
diff.saveSnapshotCopy(snapshotCopy);
}
}
return diff;
}
@Override
public Iterator<D> iterator() {
return diffs.iterator();
}
@Override
public String toString() {
return getClass().getSimpleName() + ": " + diffs;
}
}
| 10,011 | 34.006993 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyCheckpointer.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode.ha;
import static org.apache.hadoop.util.Time.monotonicNow;
import java.io.IOException;
import java.net.URI;
import java.net.URL;
import java.security.PrivilegedAction;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import java.util.concurrent.ThreadFactory;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.ha.ServiceFailedException;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.HAUtil;
import org.apache.hadoop.hdfs.server.namenode.CheckpointConf;
import org.apache.hadoop.hdfs.server.namenode.FSImage;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.namenode.SaveNamespaceCancelledException;
import org.apache.hadoop.hdfs.server.namenode.TransferFsImage;
import org.apache.hadoop.hdfs.util.Canceler;
import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.security.UserGroupInformation;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import com.google.common.util.concurrent.ThreadFactoryBuilder;
/**
* Thread which runs inside the NN when it's in Standby state,
* periodically waking up to take a checkpoint of the namespace.
* When it takes a checkpoint, it saves it to its local
* storage and then uploads it to the remote NameNode.
*/
@InterfaceAudience.Private
public class StandbyCheckpointer {
private static final Log LOG = LogFactory.getLog(StandbyCheckpointer.class);
private static final long PREVENT_AFTER_CANCEL_MS = 2*60*1000L;
private final CheckpointConf checkpointConf;
private final Configuration conf;
private final FSNamesystem namesystem;
private long lastCheckpointTime;
private final CheckpointerThread thread;
private final ThreadFactory uploadThreadFactory;
private URL activeNNAddress;
private URL myNNAddress;
private final Object cancelLock = new Object();
private Canceler canceler;
// Keep track of how many checkpoints were canceled.
// This is for use in tests.
private static int canceledCount = 0;
public StandbyCheckpointer(Configuration conf, FSNamesystem ns)
throws IOException {
this.namesystem = ns;
this.conf = conf;
this.checkpointConf = new CheckpointConf(conf);
this.thread = new CheckpointerThread();
this.uploadThreadFactory = new ThreadFactoryBuilder().setDaemon(true)
.setNameFormat("TransferFsImageUpload-%d").build();
setNameNodeAddresses(conf);
}
/**
* Determine the address of the NN we are checkpointing
* as well as our own HTTP address from the configuration.
* @throws IOException
*/
private void setNameNodeAddresses(Configuration conf) throws IOException {
// Look up our own address.
myNNAddress = getHttpAddress(conf);
// Look up the active node's address
Configuration confForActive = HAUtil.getConfForOtherNode(conf);
activeNNAddress = getHttpAddress(confForActive);
// Sanity-check.
Preconditions.checkArgument(checkAddress(activeNNAddress),
"Bad address for active NN: %s", activeNNAddress);
Preconditions.checkArgument(checkAddress(myNNAddress),
"Bad address for standby NN: %s", myNNAddress);
}
private URL getHttpAddress(Configuration conf) throws IOException {
final String scheme = DFSUtil.getHttpClientScheme(conf);
String defaultHost = NameNode.getServiceAddress(conf, true).getHostName();
URI addr = DFSUtil.getInfoServerWithDefaultHost(defaultHost, conf, scheme);
return addr.toURL();
}
/**
* Ensure that the given address is valid and has a port
* specified.
*/
private static boolean checkAddress(URL addr) {
return addr.getPort() != 0;
}
public void start() {
LOG.info("Starting standby checkpoint thread...\n" +
"Checkpointing active NN at " + activeNNAddress + "\n" +
"Serving checkpoints at " + myNNAddress);
thread.start();
}
public void stop() throws IOException {
cancelAndPreventCheckpoints("Stopping checkpointer");
thread.setShouldRun(false);
thread.interrupt();
try {
thread.join();
} catch (InterruptedException e) {
LOG.warn("Edit log tailer thread exited with an exception");
throw new IOException(e);
}
}
public void triggerRollbackCheckpoint() {
thread.interrupt();
}
private void doCheckpoint() throws InterruptedException, IOException {
assert canceler != null;
final long txid;
final NameNodeFile imageType;
// Acquire cpLock to make sure no one is modifying the name system.
// It does not need the full namesystem write lock, since the only thing
// that modifies namesystem on standby node is edit log replaying.
namesystem.cpLockInterruptibly();
try {
assert namesystem.getEditLog().isOpenForRead() :
"Standby Checkpointer should only attempt a checkpoint when " +
"NN is in standby mode, but the edit logs are in an unexpected state";
FSImage img = namesystem.getFSImage();
long prevCheckpointTxId = img.getStorage().getMostRecentCheckpointTxId();
long thisCheckpointTxId = img.getLastAppliedOrWrittenTxId();
assert thisCheckpointTxId >= prevCheckpointTxId;
if (thisCheckpointTxId == prevCheckpointTxId) {
LOG.info("A checkpoint was triggered but the Standby Node has not " +
"received any transactions since the last checkpoint at txid " +
thisCheckpointTxId + ". Skipping...");
return;
}
if (namesystem.isRollingUpgrade()
&& !namesystem.getFSImage().hasRollbackFSImage()) {
// if we will do rolling upgrade but have not created the rollback image
// yet, name this checkpoint as fsimage_rollback
imageType = NameNodeFile.IMAGE_ROLLBACK;
} else {
imageType = NameNodeFile.IMAGE;
}
img.saveNamespace(namesystem, imageType, canceler);
txid = img.getStorage().getMostRecentCheckpointTxId();
assert txid == thisCheckpointTxId : "expected to save checkpoint at txid=" +
thisCheckpointTxId + " but instead saved at txid=" + txid;
// Save the legacy OIV image, if the output dir is defined.
String outputDir = checkpointConf.getLegacyOivImageDir();
if (outputDir != null && !outputDir.isEmpty()) {
img.saveLegacyOIVImage(namesystem, outputDir, canceler);
}
} finally {
namesystem.cpUnlock();
}
// Upload the saved checkpoint back to the active
// Do this in a separate thread to avoid blocking transition to active
// See HDFS-4816
ExecutorService executor =
Executors.newSingleThreadExecutor(uploadThreadFactory);
Future<Void> upload = executor.submit(new Callable<Void>() {
@Override
public Void call() throws IOException {
TransferFsImage.uploadImageFromStorage(activeNNAddress, conf,
namesystem.getFSImage().getStorage(), imageType, txid, canceler);
return null;
}
});
executor.shutdown();
try {
upload.get();
} catch (InterruptedException e) {
// The background thread may be blocked waiting in the throttler, so
// interrupt it.
upload.cancel(true);
throw e;
} catch (ExecutionException e) {
throw new IOException("Exception during image upload: " + e.getMessage(),
e.getCause());
}
}
/**
* Cancel any checkpoint that's currently being made,
* and prevent any new checkpoints from starting for the next
* minute or so.
*/
public void cancelAndPreventCheckpoints(String msg) throws ServiceFailedException {
synchronized (cancelLock) {
// The checkpointer thread takes this lock and checks if checkpointing is
// postponed.
thread.preventCheckpointsFor(PREVENT_AFTER_CANCEL_MS);
// Before beginning a checkpoint, the checkpointer thread
// takes this lock, and creates a canceler object.
// If the canceler is non-null, then a checkpoint is in
// progress and we need to cancel it. If it's null, then
// the operation has not started, meaning that the above
// time-based prevention will take effect.
if (canceler != null) {
canceler.cancel(msg);
}
}
}
@VisibleForTesting
static int getCanceledCount() {
return canceledCount;
}
private long countUncheckpointedTxns() {
FSImage img = namesystem.getFSImage();
return img.getLastAppliedOrWrittenTxId() -
img.getStorage().getMostRecentCheckpointTxId();
}
private class CheckpointerThread extends Thread {
private volatile boolean shouldRun = true;
private volatile long preventCheckpointsUntil = 0;
private CheckpointerThread() {
super("Standby State Checkpointer");
}
private void setShouldRun(boolean shouldRun) {
this.shouldRun = shouldRun;
}
@Override
public void run() {
// We have to make sure we're logged in as far as JAAS
// is concerned, in order to use kerberized SSL properly.
SecurityUtil.doAsLoginUserOrFatal(
new PrivilegedAction<Object>() {
@Override
public Object run() {
doWork();
return null;
}
});
}
/**
* Prevent checkpoints from occurring for some time period
* in the future. This is used when preparing to enter active
* mode. We need to not only cancel any concurrent checkpoint,
* but also prevent any checkpoints from racing to start just
* after the cancel call.
*
* @param delayMs the number of MS for which checkpoints will be
* prevented
*/
private void preventCheckpointsFor(long delayMs) {
preventCheckpointsUntil = monotonicNow() + delayMs;
}
private void doWork() {
final long checkPeriod = 1000 * checkpointConf.getCheckPeriod();
// Reset checkpoint time so that we don't always checkpoint
// on startup.
lastCheckpointTime = monotonicNow();
while (shouldRun) {
boolean needRollbackCheckpoint = namesystem.isNeedRollbackFsImage();
if (!needRollbackCheckpoint) {
try {
Thread.sleep(checkPeriod);
} catch (InterruptedException ie) {
}
if (!shouldRun) {
break;
}
}
try {
// We may have lost our ticket since last checkpoint, log in again, just in case
if (UserGroupInformation.isSecurityEnabled()) {
UserGroupInformation.getCurrentUser().checkTGTAndReloginFromKeytab();
}
final long now = monotonicNow();
final long uncheckpointed = countUncheckpointedTxns();
final long secsSinceLast = (now - lastCheckpointTime) / 1000;
boolean needCheckpoint = needRollbackCheckpoint;
if (needCheckpoint) {
LOG.info("Triggering a rollback fsimage for rolling upgrade.");
} else if (uncheckpointed >= checkpointConf.getTxnCount()) {
LOG.info("Triggering checkpoint because there have been " +
uncheckpointed + " txns since the last checkpoint, which " +
"exceeds the configured threshold " +
checkpointConf.getTxnCount());
needCheckpoint = true;
} else if (secsSinceLast >= checkpointConf.getPeriod()) {
LOG.info("Triggering checkpoint because it has been " +
secsSinceLast + " seconds since the last checkpoint, which " +
"exceeds the configured interval " + checkpointConf.getPeriod());
needCheckpoint = true;
}
synchronized (cancelLock) {
if (now < preventCheckpointsUntil) {
LOG.info("But skipping this checkpoint since we are about to failover!");
canceledCount++;
continue;
}
assert canceler == null;
canceler = new Canceler();
}
if (needCheckpoint) {
doCheckpoint();
// reset needRollbackCheckpoint to false only when we finish a ckpt
// for rollback image
if (needRollbackCheckpoint
&& namesystem.getFSImage().hasRollbackFSImage()) {
namesystem.setCreatedRollbackImages(true);
namesystem.setNeedRollbackFsImage(false);
}
lastCheckpointTime = now;
}
} catch (SaveNamespaceCancelledException ce) {
LOG.info("Checkpoint was cancelled: " + ce.getMessage());
canceledCount++;
} catch (InterruptedException ie) {
LOG.info("Interrupted during checkpointing", ie);
// Probably requested shutdown.
continue;
} catch (Throwable t) {
LOG.error("Exception in doCheckpoint", t);
} finally {
synchronized (cancelLock) {
canceler = null;
}
}
}
}
}
@VisibleForTesting
URL getActiveNNAddress() {
return activeNNAddress;
}
}
| 14,359 | 36.202073 | 90 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode.ha;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.security.PrivilegedAction;
import java.security.PrivilegedExceptionAction;
import java.util.Collection;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.HAUtil;
import org.apache.hadoop.hdfs.protocolPB.NamenodeProtocolPB;
import org.apache.hadoop.hdfs.protocolPB.NamenodeProtocolTranslatorPB;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.hdfs.server.namenode.EditLogInputException;
import org.apache.hadoop.hdfs.server.namenode.EditLogInputStream;
import org.apache.hadoop.hdfs.server.namenode.FSEditLog;
import org.apache.hadoop.hdfs.server.namenode.FSImage;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.ipc.StandbyException;
import org.apache.hadoop.security.SecurityUtil;
import static org.apache.hadoop.util.Time.monotonicNow;
import static org.apache.hadoop.util.ExitUtil.terminate;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
/**
* EditLogTailer represents a thread which periodically reads from edits
* journals and applies the transactions contained within to a given
* FSNamesystem.
*/
@InterfaceAudience.Private
@InterfaceStability.Evolving
public class EditLogTailer {
public static final Log LOG = LogFactory.getLog(EditLogTailer.class);
private final EditLogTailerThread tailerThread;
private final Configuration conf;
private final FSNamesystem namesystem;
private FSEditLog editLog;
private InetSocketAddress activeAddr;
private NamenodeProtocol cachedActiveProxy = null;
/**
* The last transaction ID at which an edit log roll was initiated.
*/
private long lastRollTriggerTxId = HdfsServerConstants.INVALID_TXID;
/**
* The highest transaction ID loaded by the Standby.
*/
private long lastLoadedTxnId = HdfsServerConstants.INVALID_TXID;
/**
* The last time we successfully loaded a non-zero number of edits from the
* shared directory.
*/
private long lastLoadTimeMs;
/**
* How often the Standby should roll edit logs. Since the Standby only reads
* from finalized log segments, the Standby will only be as up-to-date as how
* often the logs are rolled.
*/
private final long logRollPeriodMs;
/**
* How often the Standby should check if there are new finalized segment(s)
* available to be read from.
*/
private final long sleepTimeMs;
public EditLogTailer(FSNamesystem namesystem, Configuration conf) {
this.tailerThread = new EditLogTailerThread();
this.conf = conf;
this.namesystem = namesystem;
this.editLog = namesystem.getEditLog();
lastLoadTimeMs = monotonicNow();
logRollPeriodMs = conf.getInt(DFSConfigKeys.DFS_HA_LOGROLL_PERIOD_KEY,
DFSConfigKeys.DFS_HA_LOGROLL_PERIOD_DEFAULT) * 1000;
if (logRollPeriodMs >= 0) {
this.activeAddr = getActiveNodeAddress();
Preconditions.checkArgument(activeAddr.getPort() > 0,
"Active NameNode must have an IPC port configured. " +
"Got address '%s'", activeAddr);
LOG.info("Will roll logs on active node at " + activeAddr + " every " +
(logRollPeriodMs / 1000) + " seconds.");
} else {
LOG.info("Not going to trigger log rolls on active node because " +
DFSConfigKeys.DFS_HA_LOGROLL_PERIOD_KEY + " is negative.");
}
sleepTimeMs = conf.getInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY,
DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_DEFAULT) * 1000;
LOG.debug("logRollPeriodMs=" + logRollPeriodMs +
" sleepTime=" + sleepTimeMs);
}
private InetSocketAddress getActiveNodeAddress() {
Configuration activeConf = HAUtil.getConfForOtherNode(conf);
return NameNode.getServiceAddress(activeConf, true);
}
private NamenodeProtocol getActiveNodeProxy() throws IOException {
if (cachedActiveProxy == null) {
int rpcTimeout = conf.getInt(
DFSConfigKeys.DFS_HA_LOGROLL_RPC_TIMEOUT_KEY,
DFSConfigKeys.DFS_HA_LOGROLL_RPC_TIMEOUT_DEFAULT);
NamenodeProtocolPB proxy = RPC.waitForProxy(NamenodeProtocolPB.class,
RPC.getProtocolVersion(NamenodeProtocolPB.class), activeAddr, conf,
rpcTimeout, Long.MAX_VALUE);
cachedActiveProxy = new NamenodeProtocolTranslatorPB(proxy);
}
assert cachedActiveProxy != null;
return cachedActiveProxy;
}
public void start() {
tailerThread.start();
}
public void stop() throws IOException {
tailerThread.setShouldRun(false);
tailerThread.interrupt();
try {
tailerThread.join();
} catch (InterruptedException e) {
LOG.warn("Edit log tailer thread exited with an exception");
throw new IOException(e);
}
}
@VisibleForTesting
FSEditLog getEditLog() {
return editLog;
}
@VisibleForTesting
public void setEditLog(FSEditLog editLog) {
this.editLog = editLog;
}
public void catchupDuringFailover() throws IOException {
Preconditions.checkState(tailerThread == null ||
!tailerThread.isAlive(),
"Tailer thread should not be running once failover starts");
// Important to do tailing as the login user, in case the shared
// edits storage is implemented by a JournalManager that depends
// on security credentials to access the logs (eg QuorumJournalManager).
SecurityUtil.doAsLoginUser(new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
try {
// It is already under the full name system lock and the checkpointer
// thread is already stopped. No need to acqure any other lock.
doTailEdits();
} catch (InterruptedException e) {
throw new IOException(e);
}
return null;
}
});
}
@VisibleForTesting
void doTailEdits() throws IOException, InterruptedException {
// Write lock needs to be interruptible here because the
// transitionToActive RPC takes the write lock before calling
// tailer.stop() -- so if we're not interruptible, it will
// deadlock.
namesystem.writeLockInterruptibly();
try {
FSImage image = namesystem.getFSImage();
long lastTxnId = image.getLastAppliedTxId();
if (LOG.isDebugEnabled()) {
LOG.debug("lastTxnId: " + lastTxnId);
}
Collection<EditLogInputStream> streams;
try {
streams = editLog.selectInputStreams(lastTxnId + 1, 0, null, false);
} catch (IOException ioe) {
// This is acceptable. If we try to tail edits in the middle of an edits
// log roll, i.e. the last one has been finalized but the new inprogress
// edits file hasn't been started yet.
LOG.warn("Edits tailer failed to find any streams. Will try again " +
"later.", ioe);
return;
}
if (LOG.isDebugEnabled()) {
LOG.debug("edit streams to load from: " + streams.size());
}
// Once we have streams to load, errors encountered are legitimate cause
// for concern, so we don't catch them here. Simple errors reading from
// disk are ignored.
long editsLoaded = 0;
try {
editsLoaded = image.loadEdits(streams, namesystem);
} catch (EditLogInputException elie) {
editsLoaded = elie.getNumEditsLoaded();
throw elie;
} finally {
if (editsLoaded > 0 || LOG.isDebugEnabled()) {
LOG.info(String.format("Loaded %d edits starting from txid %d ",
editsLoaded, lastTxnId));
}
}
if (editsLoaded > 0) {
lastLoadTimeMs = monotonicNow();
}
lastLoadedTxnId = image.getLastAppliedTxId();
} finally {
namesystem.writeUnlock();
}
}
/**
* @return time in msec of when we last loaded a non-zero number of edits.
*/
public long getLastLoadTimeMs() {
return lastLoadTimeMs;
}
/**
* @return true if the configured log roll period has elapsed.
*/
private boolean tooLongSinceLastLoad() {
return logRollPeriodMs >= 0 &&
(monotonicNow() - lastLoadTimeMs) > logRollPeriodMs ;
}
/**
* Trigger the active node to roll its logs.
*/
private void triggerActiveLogRoll() {
LOG.info("Triggering log roll on remote NameNode " + activeAddr);
try {
getActiveNodeProxy().rollEditLog();
lastRollTriggerTxId = lastLoadedTxnId;
} catch (IOException ioe) {
if (ioe instanceof RemoteException) {
ioe = ((RemoteException)ioe).unwrapRemoteException();
if (ioe instanceof StandbyException) {
LOG.info("Skipping log roll. Remote node is not in Active state: " +
ioe.getMessage().split("\n")[0]);
return;
}
}
LOG.warn("Unable to trigger a roll of the active NN", ioe);
}
}
/**
* The thread which does the actual work of tailing edits journals and
* applying the transactions to the FSNS.
*/
private class EditLogTailerThread extends Thread {
private volatile boolean shouldRun = true;
private EditLogTailerThread() {
super("Edit log tailer");
}
private void setShouldRun(boolean shouldRun) {
this.shouldRun = shouldRun;
}
@Override
public void run() {
SecurityUtil.doAsLoginUserOrFatal(
new PrivilegedAction<Object>() {
@Override
public Object run() {
doWork();
return null;
}
});
}
private void doWork() {
while (shouldRun) {
try {
// There's no point in triggering a log roll if the Standby hasn't
// read any more transactions since the last time a roll was
// triggered.
if (tooLongSinceLastLoad() &&
lastRollTriggerTxId < lastLoadedTxnId) {
triggerActiveLogRoll();
}
/**
* Check again in case someone calls {@link EditLogTailer#stop} while
* we're triggering an edit log roll, since ipc.Client catches and
* ignores {@link InterruptedException} in a few places. This fixes
* the bug described in HDFS-2823.
*/
if (!shouldRun) {
break;
}
// Prevent reading of name system while being modified. The full
// name system lock will be acquired to further block even the block
// state updates.
namesystem.cpLockInterruptibly();
try {
doTailEdits();
} finally {
namesystem.cpUnlock();
}
} catch (EditLogInputException elie) {
LOG.warn("Error while reading edits from disk. Will try again.", elie);
} catch (InterruptedException ie) {
// interrupter should have already set shouldRun to false
continue;
} catch (Throwable t) {
LOG.fatal("Unknown error encountered while tailing edits. " +
"Shutting down standby NN.", t);
terminate(1, t);
}
try {
Thread.sleep(sleepTimeMs);
} catch (InterruptedException e) {
LOG.warn("Edit log tailer interrupted", e);
}
}
}
}
}
| 12,596 | 33.324251 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/HAState.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode.ha;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
import org.apache.hadoop.ha.ServiceFailedException;
import org.apache.hadoop.hdfs.server.namenode.NameNode.OperationCategory;
import org.apache.hadoop.ipc.StandbyException;
import org.apache.hadoop.util.Time;
/**
* Namenode base state to implement state machine pattern.
*/
@InterfaceAudience.Private
abstract public class HAState {
protected final HAServiceState state;
private long lastHATransitionTime;
/**
* Constructor
* @param name Name of the state.
*/
public HAState(HAServiceState state) {
this.state = state;
}
/**
* @return the generic service state
*/
public HAServiceState getServiceState() {
return state;
}
/**
* Internal method to transition the state of a given namenode to a new state.
* @param nn Namenode
* @param s new state
* @throws ServiceFailedException on failure to transition to new state.
*/
protected final void setStateInternal(final HAContext context, final HAState s)
throws ServiceFailedException {
prepareToExitState(context);
s.prepareToEnterState(context);
context.writeLock();
try {
exitState(context);
context.setState(s);
s.enterState(context);
s.updateLastHATransitionTime();
} finally {
context.writeUnlock();
}
}
/**
* Gets the most recent HA transition time in milliseconds from the epoch.
*
* @return the most recent HA transition time in milliseconds from the epoch.
*/
public long getLastHATransitionTime() {
return lastHATransitionTime;
}
private void updateLastHATransitionTime() {
lastHATransitionTime = Time.now();
}
/**
* Method to be overridden by subclasses to prepare to enter a state.
* This method is called <em>without</em> the context being locked,
* and after {@link #prepareToExitState(HAContext)} has been called
* for the previous state, but before {@link #exitState(HAContext)}
* has been called for the previous state.
* @param context HA context
* @throws ServiceFailedException on precondition failure
*/
public void prepareToEnterState(final HAContext context)
throws ServiceFailedException {}
/**
* Method to be overridden by subclasses to perform steps necessary for
* entering a state.
* @param context HA context
* @throws ServiceFailedException on failure to enter the state.
*/
public abstract void enterState(final HAContext context)
throws ServiceFailedException;
/**
* Method to be overridden by subclasses to prepare to exit a state.
* This method is called <em>without</em> the context being locked.
* This is used by the standby state to cancel any checkpoints
* that are going on. It can also be used to check any preconditions
* for the state transition.
*
* This method should not make any destructuve changes to the state
* (eg stopping threads) since {@link #prepareToEnterState(HAContext)}
* may subsequently cancel the state transition.
* @param context HA context
* @throws ServiceFailedException on precondition failure
*/
public void prepareToExitState(final HAContext context)
throws ServiceFailedException {}
/**
* Method to be overridden by subclasses to perform steps necessary for
* exiting a state.
* @param context HA context
* @throws ServiceFailedException on failure to enter the state.
*/
public abstract void exitState(final HAContext context)
throws ServiceFailedException;
/**
* Move from the existing state to a new state
* @param context HA context
* @param s new state
* @throws ServiceFailedException on failure to transition to new state.
*/
public void setState(HAContext context, HAState s) throws ServiceFailedException {
if (this == s) { // Aleady in the new state
return;
}
throw new ServiceFailedException("Transtion from state " + this + " to "
+ s + " is not allowed.");
}
/**
* Check if an operation is supported in a given state.
* @param context HA context
* @param op Type of the operation.
* @throws StandbyException if a given type of operation is not
* supported in standby state
*/
public abstract void checkOperation(final HAContext context, final OperationCategory op)
throws StandbyException;
public abstract boolean shouldPopulateReplQueues();
/**
* @return String representation of the service state.
*/
@Override
public String toString() {
return state.toString();
}
}
| 5,484 | 32.445122 | 90 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/AbstractNNFailoverProxyProvider.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode.ha;
import java.util.concurrent.atomic.AtomicBoolean;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.retry.FailoverProxyProvider;
public abstract class AbstractNNFailoverProxyProvider<T> implements
FailoverProxyProvider <T> {
protected AtomicBoolean fallbackToSimpleAuth;
/**
* Inquire whether logical HA URI is used for the implementation. If it is
* used, a special token handling may be needed to make sure a token acquired
* from a node in the HA pair can be used against the other node.
*
* @return true if logical HA URI is used. false, if not used.
*/
public abstract boolean useLogicalURI();
/**
* Set for tracking if a secure client falls back to simple auth. This method
* is synchronized only to stifle a Findbugs warning.
*
* @param fallbackToSimpleAuth - set to true or false during this method to
* indicate if a secure client falls back to simple auth
*/
public synchronized void setFallbackToSimpleAuth(
AtomicBoolean fallbackToSimpleAuth) {
this.fallbackToSimpleAuth = fallbackToSimpleAuth;
}
}
| 1,966 | 36.826923 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyState.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode.ha;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.ha.ServiceFailedException;
import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.namenode.NameNode.OperationCategory;
import org.apache.hadoop.ipc.StandbyException;
/**
* Namenode standby state. In this state the namenode acts as warm standby and
* keeps the following updated:
* <ul>
* <li>Namespace by getting the edits.</li>
* <li>Block location information by receiving block reports and blocks
* received from the datanodes.</li>
* </ul>
*
* It does not handle read/write/checkpoint operations.
*/
@InterfaceAudience.Private
public class StandbyState extends HAState {
public StandbyState() {
super(HAServiceState.STANDBY);
}
@Override
public void setState(HAContext context, HAState s) throws ServiceFailedException {
if (s == NameNode.ACTIVE_STATE) {
setStateInternal(context, s);
return;
}
super.setState(context, s);
}
@Override
public void enterState(HAContext context) throws ServiceFailedException {
try {
context.startStandbyServices();
} catch (IOException e) {
throw new ServiceFailedException("Failed to start standby services", e);
}
}
@Override
public void prepareToExitState(HAContext context) throws ServiceFailedException {
context.prepareToStopStandbyServices();
}
@Override
public void exitState(HAContext context) throws ServiceFailedException {
try {
context.stopStandbyServices();
} catch (IOException e) {
throw new ServiceFailedException("Failed to stop standby services", e);
}
}
@Override
public void checkOperation(HAContext context, OperationCategory op)
throws StandbyException {
if (op == OperationCategory.UNCHECKED ||
(op == OperationCategory.READ && context.allowStaleReads())) {
return;
}
String faq = ". Visit https://s.apache.org/sbnn-error";
String msg = "Operation category " + op + " is not supported in state "
+ context.getState() + faq;
throw new StandbyException(msg);
}
@Override
public boolean shouldPopulateReplQueues() {
return false;
}
}
| 3,154 | 31.525773 | 84 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ActiveState.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode.ha;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
import org.apache.hadoop.ha.ServiceFailedException;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.namenode.NameNode.OperationCategory;
/**
* Active state of the namenode. In this state, namenode provides the namenode
* service and handles operations of type {@link OperationCategory#WRITE} and
* {@link OperationCategory#READ}.
*/
@InterfaceAudience.Private
public class ActiveState extends HAState {
public ActiveState() {
super(HAServiceState.ACTIVE);
}
@Override
public void checkOperation(HAContext context, OperationCategory op) {
return; // All operations are allowed in active state
}
@Override
public boolean shouldPopulateReplQueues() {
return true;
}
@Override
public void setState(HAContext context, HAState s) throws ServiceFailedException {
if (s == NameNode.STANDBY_STATE) {
setStateInternal(context, s);
return;
}
super.setState(context, s);
}
@Override
public void enterState(HAContext context) throws ServiceFailedException {
try {
context.startActiveServices();
} catch (IOException e) {
throw new ServiceFailedException("Failed to start active services", e);
}
}
@Override
public void exitState(HAContext context) throws ServiceFailedException {
try {
context.stopActiveServices();
} catch (IOException e) {
throw new ServiceFailedException("Failed to stop active services", e);
}
}
}
| 2,493 | 31.38961 | 84 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/WrappedFailoverProxyProvider.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode.ha;
import java.io.Closeable;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.net.URI;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
import org.apache.hadoop.io.retry.FailoverProxyProvider;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.security.UserGroupInformation;
import com.google.common.base.Preconditions;
/**
* A NNFailoverProxyProvider implementation which wrapps old implementations
* directly implementing the {@link FailoverProxyProvider} interface.
*
* It is assumed that the old impelmentation is using logical URI.
*/
public class WrappedFailoverProxyProvider<T> extends
AbstractNNFailoverProxyProvider<T> {
private final FailoverProxyProvider<T> proxyProvider;
/**
* Wrap the given instance of an old FailoverProxyProvider.
*/
public WrappedFailoverProxyProvider(FailoverProxyProvider<T> provider) {
proxyProvider = provider;
}
@Override
public Class<T> getInterface() {
return proxyProvider.getInterface();
}
@Override
public synchronized ProxyInfo<T> getProxy() {
return proxyProvider.getProxy();
}
@Override
public void performFailover(T currentProxy) {
proxyProvider.performFailover(currentProxy);
}
/**
* Close the proxy,
*/
@Override
public synchronized void close() throws IOException {
proxyProvider.close();
}
/**
* Assume logical URI is used for old proxy provider implementations.
*/
@Override
public boolean useLogicalURI() {
return true;
}
}
| 2,437 | 29.098765 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode.ha;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.net.URI;
import java.net.URL;
import java.security.PrivilegedAction;
import java.util.Collection;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configurable;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.HAUtil;
import org.apache.hadoop.hdfs.NameNodeProxies;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException;
import org.apache.hadoop.hdfs.server.common.Storage;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
import org.apache.hadoop.hdfs.server.common.Storage.StorageState;
import org.apache.hadoop.hdfs.server.namenode.EditLogInputStream;
import org.apache.hadoop.hdfs.server.namenode.FSImage;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.NNStorage;
import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile;
import org.apache.hadoop.hdfs.server.namenode.NNUpgradeUtil;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.namenode.TransferFsImage;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
import org.apache.hadoop.hdfs.tools.DFSHAAdmin;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.MD5Hash;
import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
import com.google.common.base.Joiner;
import com.google.common.base.Preconditions;
/**
* Tool which allows the standby node's storage directories to be bootstrapped
* by copying the latest namespace snapshot from the active namenode. This is
* used when first configuring an HA cluster.
*/
@InterfaceAudience.Private
public class BootstrapStandby implements Tool, Configurable {
private static final Log LOG = LogFactory.getLog(BootstrapStandby.class);
private String nsId;
private String nnId;
private String otherNNId;
private URL otherHttpAddr;
private InetSocketAddress otherIpcAddr;
private Collection<URI> dirsToFormat;
private List<URI> editUrisToFormat;
private List<URI> sharedEditsUris;
private Configuration conf;
private boolean force = false;
private boolean interactive = true;
private boolean skipSharedEditsCheck = false;
// Exit/return codes.
static final int ERR_CODE_FAILED_CONNECT = 2;
static final int ERR_CODE_INVALID_VERSION = 3;
// Skip 4 - was used in previous versions, but no longer returned.
static final int ERR_CODE_ALREADY_FORMATTED = 5;
static final int ERR_CODE_LOGS_UNAVAILABLE = 6;
@Override
public int run(String[] args) throws Exception {
parseArgs(args);
parseConfAndFindOtherNN();
NameNode.checkAllowFormat(conf);
InetSocketAddress myAddr = NameNode.getAddress(conf);
SecurityUtil.login(conf, DFS_NAMENODE_KEYTAB_FILE_KEY,
DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, myAddr.getHostName());
return SecurityUtil.doAsLoginUserOrFatal(new PrivilegedAction<Integer>() {
@Override
public Integer run() {
try {
return doRun();
} catch (IOException e) {
throw new RuntimeException(e);
}
}
});
}
private void parseArgs(String[] args) {
for (String arg : args) {
if ("-force".equals(arg)) {
force = true;
} else if ("-nonInteractive".equals(arg)) {
interactive = false;
} else if ("-skipSharedEditsCheck".equals(arg)) {
skipSharedEditsCheck = true;
} else {
printUsage();
throw new HadoopIllegalArgumentException(
"Illegal argument: " + arg);
}
}
}
private void printUsage() {
System.err.println("Usage: " + this.getClass().getSimpleName() +
" [-force] [-nonInteractive] [-skipSharedEditsCheck]");
}
private NamenodeProtocol createNNProtocolProxy()
throws IOException {
return NameNodeProxies.createNonHAProxy(getConf(),
otherIpcAddr, NamenodeProtocol.class,
UserGroupInformation.getLoginUser(), true)
.getProxy();
}
private int doRun() throws IOException {
NamenodeProtocol proxy = createNNProtocolProxy();
NamespaceInfo nsInfo;
boolean isUpgradeFinalized;
try {
nsInfo = proxy.versionRequest();
isUpgradeFinalized = proxy.isUpgradeFinalized();
} catch (IOException ioe) {
LOG.fatal("Unable to fetch namespace information from active NN at " +
otherIpcAddr + ": " + ioe.getMessage());
if (LOG.isDebugEnabled()) {
LOG.debug("Full exception trace", ioe);
}
return ERR_CODE_FAILED_CONNECT;
}
if (!checkLayoutVersion(nsInfo)) {
LOG.fatal("Layout version on remote node (" + nsInfo.getLayoutVersion()
+ ") does not match " + "this node's layout version ("
+ HdfsServerConstants.NAMENODE_LAYOUT_VERSION + ")");
return ERR_CODE_INVALID_VERSION;
}
System.out.println(
"=====================================================\n" +
"About to bootstrap Standby ID " + nnId + " from:\n" +
" Nameservice ID: " + nsId + "\n" +
" Other Namenode ID: " + otherNNId + "\n" +
" Other NN's HTTP address: " + otherHttpAddr + "\n" +
" Other NN's IPC address: " + otherIpcAddr + "\n" +
" Namespace ID: " + nsInfo.getNamespaceID() + "\n" +
" Block pool ID: " + nsInfo.getBlockPoolID() + "\n" +
" Cluster ID: " + nsInfo.getClusterID() + "\n" +
" Layout version: " + nsInfo.getLayoutVersion() + "\n" +
" isUpgradeFinalized: " + isUpgradeFinalized + "\n" +
"=====================================================");
NNStorage storage = new NNStorage(conf, dirsToFormat, editUrisToFormat);
if (!isUpgradeFinalized) {
// the remote NameNode is in upgrade state, this NameNode should also
// create the previous directory. First prepare the upgrade and rename
// the current dir to previous.tmp.
LOG.info("The active NameNode is in Upgrade. " +
"Prepare the upgrade for the standby NameNode as well.");
if (!doPreUpgrade(storage, nsInfo)) {
return ERR_CODE_ALREADY_FORMATTED;
}
} else if (!format(storage, nsInfo)) { // prompt the user to format storage
return ERR_CODE_ALREADY_FORMATTED;
}
// download the fsimage from active namenode
int download = downloadImage(storage, proxy);
if (download != 0) {
return download;
}
// finish the upgrade: rename previous.tmp to previous
if (!isUpgradeFinalized) {
doUpgrade(storage);
}
return 0;
}
/**
* Iterate over all the storage directories, checking if it should be
* formatted. Format the storage if necessary and allowed by the user.
* @return True if formatting is processed
*/
private boolean format(NNStorage storage, NamespaceInfo nsInfo)
throws IOException {
// Check with the user before blowing away data.
if (!Storage.confirmFormat(storage.dirIterable(null), force, interactive)) {
storage.close();
return false;
} else {
// Format the storage (writes VERSION file)
storage.format(nsInfo);
return true;
}
}
/**
* This is called when using bootstrapStandby for HA upgrade. The SBN should
* also create previous directory so that later when it starts, it understands
* that the cluster is in the upgrade state. This function renames the old
* current directory to previous.tmp.
*/
private boolean doPreUpgrade(NNStorage storage, NamespaceInfo nsInfo)
throws IOException {
boolean isFormatted = false;
Map<StorageDirectory, StorageState> dataDirStates =
new HashMap<>();
try {
isFormatted = FSImage.recoverStorageDirs(StartupOption.UPGRADE, storage,
dataDirStates);
if (dataDirStates.values().contains(StorageState.NOT_FORMATTED)) {
// recoverStorageDirs returns true if there is a formatted directory
isFormatted = false;
System.err.println("The original storage directory is not formatted.");
}
} catch (InconsistentFSStateException e) {
// if the storage is in a bad state,
LOG.warn("The storage directory is in an inconsistent state", e);
} finally {
storage.unlockAll();
}
// if there is InconsistentFSStateException or the storage is not formatted,
// format the storage. Although this format is done through the new
// software, since in HA setup the SBN is rolled back through
// "-bootstrapStandby", we should still be fine.
if (!isFormatted && !format(storage, nsInfo)) {
return false;
}
// make sure there is no previous directory
FSImage.checkUpgrade(storage);
// Do preUpgrade for each directory
for (Iterator<StorageDirectory> it = storage.dirIterator(false);
it.hasNext();) {
StorageDirectory sd = it.next();
try {
NNUpgradeUtil.renameCurToTmp(sd);
} catch (IOException e) {
LOG.error("Failed to move aside pre-upgrade storage " +
"in image directory " + sd.getRoot(), e);
throw e;
}
}
storage.setStorageInfo(nsInfo);
storage.setBlockPoolID(nsInfo.getBlockPoolID());
return true;
}
private void doUpgrade(NNStorage storage) throws IOException {
for (Iterator<StorageDirectory> it = storage.dirIterator(false);
it.hasNext();) {
StorageDirectory sd = it.next();
NNUpgradeUtil.doUpgrade(sd, storage);
}
}
private int downloadImage(NNStorage storage, NamenodeProtocol proxy)
throws IOException {
// Load the newly formatted image, using all of the directories
// (including shared edits)
final long imageTxId = proxy.getMostRecentCheckpointTxId();
final long curTxId = proxy.getTransactionID();
FSImage image = new FSImage(conf);
try {
image.getStorage().setStorageInfo(storage);
image.initEditLog(StartupOption.REGULAR);
assert image.getEditLog().isOpenForRead() :
"Expected edit log to be open for read";
// Ensure that we have enough edits already in the shared directory to
// start up from the last checkpoint on the active.
if (!skipSharedEditsCheck &&
!checkLogsAvailableForRead(image, imageTxId, curTxId)) {
return ERR_CODE_LOGS_UNAVAILABLE;
}
image.getStorage().writeTransactionIdFileToStorage(curTxId);
// Download that checkpoint into our storage directories.
MD5Hash hash = TransferFsImage.downloadImageToStorage(
otherHttpAddr, imageTxId, storage, true);
image.saveDigestAndRenameCheckpointImage(NameNodeFile.IMAGE, imageTxId,
hash);
} catch (IOException ioe) {
throw ioe;
} finally {
image.close();
}
return 0;
}
private boolean checkLogsAvailableForRead(FSImage image, long imageTxId,
long curTxIdOnOtherNode) {
if (imageTxId == curTxIdOnOtherNode) {
// The other node hasn't written any logs since the last checkpoint.
// This can be the case if the NN was freshly formatted as HA, and
// then started in standby mode, so it has no edit logs at all.
return true;
}
long firstTxIdInLogs = imageTxId + 1;
assert curTxIdOnOtherNode >= firstTxIdInLogs :
"first=" + firstTxIdInLogs + " onOtherNode=" + curTxIdOnOtherNode;
try {
Collection<EditLogInputStream> streams =
image.getEditLog().selectInputStreams(
firstTxIdInLogs, curTxIdOnOtherNode, null, true);
for (EditLogInputStream stream : streams) {
IOUtils.closeStream(stream);
}
return true;
} catch (IOException e) {
String msg = "Unable to read transaction ids " +
firstTxIdInLogs + "-" + curTxIdOnOtherNode +
" from the configured shared edits storage " +
Joiner.on(",").join(sharedEditsUris) + ". " +
"Please copy these logs into the shared edits storage " +
"or call saveNamespace on the active node.\n" +
"Error: " + e.getLocalizedMessage();
if (LOG.isDebugEnabled()) {
LOG.fatal(msg, e);
} else {
LOG.fatal(msg);
}
return false;
}
}
private boolean checkLayoutVersion(NamespaceInfo nsInfo) throws IOException {
return (nsInfo.getLayoutVersion() == HdfsServerConstants.NAMENODE_LAYOUT_VERSION);
}
private void parseConfAndFindOtherNN() throws IOException {
Configuration conf = getConf();
nsId = DFSUtil.getNamenodeNameServiceId(conf);
if (!HAUtil.isHAEnabled(conf, nsId)) {
throw new HadoopIllegalArgumentException(
"HA is not enabled for this namenode.");
}
nnId = HAUtil.getNameNodeId(conf, nsId);
NameNode.initializeGenericKeys(conf, nsId, nnId);
if (!HAUtil.usesSharedEditsDir(conf)) {
throw new HadoopIllegalArgumentException(
"Shared edits storage is not enabled for this namenode.");
}
Configuration otherNode = HAUtil.getConfForOtherNode(conf);
otherNNId = HAUtil.getNameNodeId(otherNode, nsId);
otherIpcAddr = NameNode.getServiceAddress(otherNode, true);
Preconditions.checkArgument(otherIpcAddr.getPort() != 0 &&
!otherIpcAddr.getAddress().isAnyLocalAddress(),
"Could not determine valid IPC address for other NameNode (%s)" +
", got: %s", otherNNId, otherIpcAddr);
final String scheme = DFSUtil.getHttpClientScheme(conf);
otherHttpAddr = DFSUtil.getInfoServerWithDefaultHost(
otherIpcAddr.getHostName(), otherNode, scheme).toURL();
dirsToFormat = FSNamesystem.getNamespaceDirs(conf);
editUrisToFormat = FSNamesystem.getNamespaceEditsDirs(
conf, false);
sharedEditsUris = FSNamesystem.getSharedEditsDirs(conf);
}
@Override
public void setConf(Configuration conf) {
this.conf = DFSHAAdmin.addSecurityConfiguration(conf);
}
@Override
public Configuration getConf() {
return conf;
}
public static int run(String[] argv, Configuration conf) throws IOException {
BootstrapStandby bs = new BootstrapStandby();
bs.setConf(conf);
try {
return ToolRunner.run(bs, argv);
} catch (Exception e) {
if (e instanceof IOException) {
throw (IOException)e;
} else {
throw new IOException(e);
}
}
}
}
| 16,033 | 36.201856 | 87 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RequestHedgingProxyProvider.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode.ha;
import java.lang.reflect.InvocationHandler;
import java.lang.reflect.Method;
import java.lang.reflect.Proxy;
import java.net.URI;
import java.util.HashMap;
import java.util.Map;
import java.util.concurrent.Callable;
import java.util.concurrent.CompletionService;
import java.util.concurrent.ExecutorCompletionService;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import com.google.common.annotations.VisibleForTesting;
import org.apache.hadoop.io.retry.MultiException;
/**
* A FailoverProxyProvider implementation that technically does not "failover"
* per-se. It constructs a wrapper proxy that sends the request to ALL
* underlying proxies simultaneously. It assumes the in an HA setup, there will
* be only one Active, and the active should respond faster than any configured
* standbys. Once it recieve a response from any one of the configred proxies,
* outstanding requests to other proxies are immediately cancelled.
*/
public class RequestHedgingProxyProvider<T> extends
ConfiguredFailoverProxyProvider<T> {
private static final Log LOG =
LogFactory.getLog(RequestHedgingProxyProvider.class);
class RequestHedgingInvocationHandler implements InvocationHandler {
final Map<String, ProxyInfo<T>> targetProxies;
public RequestHedgingInvocationHandler(
Map<String, ProxyInfo<T>> targetProxies) {
this.targetProxies = new HashMap<>(targetProxies);
}
/**
* Creates a Executor and invokes all proxies concurrently. This
* implementation assumes that Clients have configured proper socket
* timeouts, else the call can block forever.
*
* @param proxy
* @param method
* @param args
* @return
* @throws Throwable
*/
@Override
public Object
invoke(Object proxy, final Method method, final Object[] args)
throws Throwable {
Map<Future<Object>, ProxyInfo<T>> proxyMap = new HashMap<>();
int numAttempts = 0;
ExecutorService executor = null;
CompletionService<Object> completionService;
try {
// Optimization : if only 2 proxies are configured and one had failed
// over, then we dont need to create a threadpool etc.
targetProxies.remove(toIgnore);
if (targetProxies.size() == 1) {
ProxyInfo<T> proxyInfo = targetProxies.values().iterator().next();
Object retVal = method.invoke(proxyInfo.proxy, args);
successfulProxy = proxyInfo;
return retVal;
}
executor = Executors.newFixedThreadPool(proxies.size());
completionService = new ExecutorCompletionService<>(executor);
for (final Map.Entry<String, ProxyInfo<T>> pEntry :
targetProxies.entrySet()) {
Callable<Object> c = new Callable<Object>() {
@Override
public Object call() throws Exception {
return method.invoke(pEntry.getValue().proxy, args);
}
};
proxyMap.put(completionService.submit(c), pEntry.getValue());
numAttempts++;
}
Map<String, Exception> badResults = new HashMap<>();
while (numAttempts > 0) {
Future<Object> callResultFuture = completionService.take();
Object retVal;
try {
retVal = callResultFuture.get();
successfulProxy = proxyMap.get(callResultFuture);
if (LOG.isDebugEnabled()) {
LOG.debug("Invocation successful on ["
+ successfulProxy.proxyInfo + "]");
}
return retVal;
} catch (Exception ex) {
ProxyInfo<T> tProxyInfo = proxyMap.get(callResultFuture);
LOG.warn("Invocation returned exception on "
+ "[" + tProxyInfo.proxyInfo + "]");
badResults.put(tProxyInfo.proxyInfo, ex);
numAttempts--;
}
}
// At this point we should have All bad results (Exceptions)
// Or should have returned with successful result.
if (badResults.size() == 1) {
throw badResults.values().iterator().next();
} else {
throw new MultiException(badResults);
}
} finally {
if (executor != null) {
executor.shutdownNow();
}
}
}
}
private volatile ProxyInfo<T> successfulProxy = null;
private volatile String toIgnore = null;
public RequestHedgingProxyProvider(
Configuration conf, URI uri, Class<T> xface) {
this(conf, uri, xface, new DefaultProxyFactory<T>());
}
@VisibleForTesting
RequestHedgingProxyProvider(Configuration conf, URI uri,
Class<T> xface, ProxyFactory<T> factory) {
super(conf, uri, xface, factory);
}
@SuppressWarnings("unchecked")
@Override
public synchronized ProxyInfo<T> getProxy() {
if (successfulProxy != null) {
return successfulProxy;
}
Map<String, ProxyInfo<T>> targetProxyInfos = new HashMap<>();
StringBuilder combinedInfo = new StringBuilder('[');
for (int i = 0; i < proxies.size(); i++) {
ProxyInfo<T> pInfo = super.getProxy();
incrementProxyIndex();
targetProxyInfos.put(pInfo.proxyInfo, pInfo);
combinedInfo.append(pInfo.proxyInfo).append(',');
}
combinedInfo.append(']');
T wrappedProxy = (T) Proxy.newProxyInstance(
RequestHedgingInvocationHandler.class.getClassLoader(),
new Class<?>[]{xface},
new RequestHedgingInvocationHandler(targetProxyInfos));
return new ProxyInfo<T>(wrappedProxy, combinedInfo.toString());
}
@Override
public synchronized void performFailover(T currentProxy) {
toIgnore = successfulProxy.proxyInfo;
successfulProxy = null;
}
}
| 6,827 | 35.513369 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ConfiguredFailoverProxyProvider.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode.ha;
import java.io.Closeable;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.net.URI;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import java.util.Map;
import java.util.concurrent.atomic.AtomicBoolean;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.HAUtil;
import org.apache.hadoop.hdfs.NameNodeProxies;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.security.UserGroupInformation;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
/**
* A FailoverProxyProvider implementation which allows one to configure two URIs
* to connect to during fail-over. The first configured address is tried first,
* and on a fail-over event the other address is tried.
*/
public class ConfiguredFailoverProxyProvider<T> extends
AbstractNNFailoverProxyProvider<T> {
private static final Log LOG =
LogFactory.getLog(ConfiguredFailoverProxyProvider.class);
interface ProxyFactory<T> {
T createProxy(Configuration conf, InetSocketAddress nnAddr, Class<T> xface,
UserGroupInformation ugi, boolean withRetries,
AtomicBoolean fallbackToSimpleAuth) throws IOException;
}
static class DefaultProxyFactory<T> implements ProxyFactory<T> {
@Override
public T createProxy(Configuration conf, InetSocketAddress nnAddr,
Class<T> xface, UserGroupInformation ugi, boolean withRetries,
AtomicBoolean fallbackToSimpleAuth) throws IOException {
return NameNodeProxies.createNonHAProxy(conf,
nnAddr, xface, ugi, false, fallbackToSimpleAuth).getProxy();
}
}
protected final Configuration conf;
protected final List<AddressRpcProxyPair<T>> proxies =
new ArrayList<AddressRpcProxyPair<T>>();
private final UserGroupInformation ugi;
protected final Class<T> xface;
private int currentProxyIndex = 0;
private final ProxyFactory<T> factory;
public ConfiguredFailoverProxyProvider(Configuration conf, URI uri,
Class<T> xface) {
this(conf, uri, xface, new DefaultProxyFactory<T>());
}
@VisibleForTesting
ConfiguredFailoverProxyProvider(Configuration conf, URI uri,
Class<T> xface, ProxyFactory<T> factory) {
Preconditions.checkArgument(
xface.isAssignableFrom(NamenodeProtocols.class),
"Interface class %s is not a valid NameNode protocol!");
this.xface = xface;
this.conf = new Configuration(conf);
int maxRetries = this.conf.getInt(
HdfsClientConfigKeys.Failover.CONNECTION_RETRIES_KEY,
HdfsClientConfigKeys.Failover.CONNECTION_RETRIES_DEFAULT);
this.conf.setInt(
CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY,
maxRetries);
int maxRetriesOnSocketTimeouts = this.conf.getInt(
HdfsClientConfigKeys.Failover.CONNECTION_RETRIES_ON_SOCKET_TIMEOUTS_KEY,
HdfsClientConfigKeys.Failover.CONNECTION_RETRIES_ON_SOCKET_TIMEOUTS_DEFAULT);
this.conf.setInt(
CommonConfigurationKeysPublic
.IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SOCKET_TIMEOUTS_KEY,
maxRetriesOnSocketTimeouts);
try {
ugi = UserGroupInformation.getCurrentUser();
Map<String, Map<String, InetSocketAddress>> map = DFSUtil.getHaNnRpcAddresses(
conf);
Map<String, InetSocketAddress> addressesInNN = map.get(uri.getHost());
if (addressesInNN == null || addressesInNN.size() == 0) {
throw new RuntimeException("Could not find any configured addresses " +
"for URI " + uri);
}
Collection<InetSocketAddress> addressesOfNns = addressesInNN.values();
for (InetSocketAddress address : addressesOfNns) {
proxies.add(new AddressRpcProxyPair<T>(address));
}
// The client may have a delegation token set for the logical
// URI of the cluster. Clone this token to apply to each of the
// underlying IPC addresses so that the IPC code can find it.
HAUtil.cloneDelegationTokenForLogicalUri(ugi, uri, addressesOfNns);
this.factory = factory;
} catch (IOException e) {
throw new RuntimeException(e);
}
}
@Override
public Class<T> getInterface() {
return xface;
}
/**
* Lazily initialize the RPC proxy object.
*/
@Override
public synchronized ProxyInfo<T> getProxy() {
AddressRpcProxyPair<T> current = proxies.get(currentProxyIndex);
if (current.namenode == null) {
try {
current.namenode = factory.createProxy(conf,
current.address, xface, ugi, false, fallbackToSimpleAuth);
} catch (IOException e) {
LOG.error("Failed to create RPC proxy to NameNode", e);
throw new RuntimeException(e);
}
}
return new ProxyInfo<T>(current.namenode, current.address.toString());
}
@Override
public void performFailover(T currentProxy) {
incrementProxyIndex();
}
synchronized void incrementProxyIndex() {
currentProxyIndex = (currentProxyIndex + 1) % proxies.size();
}
/**
* A little pair object to store the address and connected RPC proxy object to
* an NN. Note that {@link AddressRpcProxyPair#namenode} may be null.
*/
private static class AddressRpcProxyPair<T> {
public final InetSocketAddress address;
public T namenode;
public AddressRpcProxyPair(InetSocketAddress address) {
this.address = address;
}
}
/**
* Close all the proxy objects which have been opened over the lifetime of
* this proxy provider.
*/
@Override
public synchronized void close() throws IOException {
for (AddressRpcProxyPair<T> proxy : proxies) {
if (proxy.namenode != null) {
if (proxy.namenode instanceof Closeable) {
((Closeable)proxy.namenode).close();
} else {
RPC.stopProxy(proxy.namenode);
}
}
}
}
/**
* Logical URI is required for this failover proxy provider.
*/
@Override
public boolean useLogicalURI() {
return true;
}
}
| 7,229 | 33.759615 | 85 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/HAContext.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode.ha;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.ha.ServiceFailedException;
import org.apache.hadoop.hdfs.server.namenode.NameNode.OperationCategory;
import org.apache.hadoop.ipc.StandbyException;
/**
* Context that is to be used by {@link HAState} for getting/setting the
* current state and performing required operations.
*/
@InterfaceAudience.Private
public interface HAContext {
/** Set the state of the context to given {@code state} */
public void setState(HAState state);
/** Get the state from the context */
public HAState getState();
/** Start the services required in active state */
public void startActiveServices() throws IOException;
/** Stop the services when exiting active state */
public void stopActiveServices() throws IOException;
/** Start the services required in standby state */
public void startStandbyServices() throws IOException;
/** Prepare to exit the standby state */
public void prepareToStopStandbyServices() throws ServiceFailedException;
/** Stop the services when exiting standby state */
public void stopStandbyServices() throws IOException;
/**
* Take a write-lock on the underlying namesystem
* so that no concurrent state transitions or edits
* can be made.
*/
void writeLock();
/**
* Unlock the lock taken by {@link #writeLock()}
*/
void writeUnlock();
/**
* Verify that the given operation category is allowed in the current state.
* This is to allow NN implementations (eg BackupNode) to override it with
* node-specific handling.
*
* If the operation which is being checked will be taking the FSNS lock, it's
* advisable to check the operation category both immediately before and after
* taking the lock. This is because clients rely on the StandbyException
* thrown by this method in order to trigger client failover, and if a client
* first tries to contact the Standby NN, it could block for a long time if
* the Standby is holding the lock for a while, e.g. when performing a
* checkpoint. See HDFS-4591 for more details.
*/
void checkOperation(OperationCategory op) throws StandbyException;
/**
* @return true if the node should allow stale reads (ie reads
* while the namespace is not up to date)
*/
boolean allowStaleReads();
}
| 3,236 | 36.206897 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/IPFailoverProxyProvider.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode.ha;
import java.io.Closeable;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.net.URI;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.hdfs.NameNodeProxies;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.security.UserGroupInformation;
import com.google.common.base.Preconditions;
/**
* A NNFailoverProxyProvider implementation which works on IP failover setup.
* Only one proxy is used to connect to both servers and switching between
* the servers is done by the environment/infrastructure, which guarantees
* clients can consistently reach only one node at a time.
*
* Clients with a live connection will likely get connection reset after an
* IP failover. This case will be handled by the
* FailoverOnNetworkExceptionRetry retry policy. I.e. if the call is
* not idempotent, it won't get retried.
*
* A connection reset while setting up a connection (i.e. before sending a
* request) will be handled in ipc client.
*
* The namenode URI must contain a resolvable host name.
*/
public class IPFailoverProxyProvider<T> extends
AbstractNNFailoverProxyProvider<T> {
private final Configuration conf;
private final Class<T> xface;
private final URI nameNodeUri;
private ProxyInfo<T> nnProxyInfo = null;
public IPFailoverProxyProvider(Configuration conf, URI uri,
Class<T> xface) {
Preconditions.checkArgument(
xface.isAssignableFrom(NamenodeProtocols.class),
"Interface class %s is not a valid NameNode protocol!");
this.xface = xface;
this.nameNodeUri = uri;
this.conf = new Configuration(conf);
int maxRetries = this.conf.getInt(
HdfsClientConfigKeys.Failover.CONNECTION_RETRIES_KEY,
HdfsClientConfigKeys.Failover.CONNECTION_RETRIES_DEFAULT);
this.conf.setInt(
CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY,
maxRetries);
int maxRetriesOnSocketTimeouts = this.conf.getInt(
HdfsClientConfigKeys.Failover.CONNECTION_RETRIES_ON_SOCKET_TIMEOUTS_KEY,
HdfsClientConfigKeys.Failover.CONNECTION_RETRIES_ON_SOCKET_TIMEOUTS_DEFAULT);
this.conf.setInt(
CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SOCKET_TIMEOUTS_KEY,
maxRetriesOnSocketTimeouts);
}
@Override
public Class<T> getInterface() {
return xface;
}
@Override
public synchronized ProxyInfo<T> getProxy() {
// Create a non-ha proxy if not already created.
if (nnProxyInfo == null) {
try {
// Create a proxy that is not wrapped in RetryProxy
InetSocketAddress nnAddr = NameNode.getAddress(nameNodeUri);
nnProxyInfo = new ProxyInfo<T>(NameNodeProxies.createNonHAProxy(
conf, nnAddr, xface, UserGroupInformation.getCurrentUser(),
false).getProxy(), nnAddr.toString());
} catch (IOException ioe) {
throw new RuntimeException(ioe);
}
}
return nnProxyInfo;
}
/** Nothing to do for IP failover */
@Override
public void performFailover(T currentProxy) {
}
/**
* Close the proxy,
*/
@Override
public synchronized void close() throws IOException {
if (nnProxyInfo == null) {
return;
}
if (nnProxyInfo.proxy instanceof Closeable) {
((Closeable)nnProxyInfo.proxy).close();
} else {
RPC.stopProxy(nnProxyInfo.proxy);
}
}
/**
* Logical URI is not used for IP failover.
*/
@Override
public boolean useLogicalURI() {
return false;
}
}
| 4,613 | 33.691729 | 92 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/KeyManager.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.balancer;
import java.io.Closeable;
import java.io.IOException;
import java.util.EnumSet;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.DataEncryptionKeyFactory;
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager;
import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey;
import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.util.Daemon;
import org.apache.hadoop.util.StringUtils;
/**
* The class provides utilities for key and token management.
*/
@InterfaceAudience.Private
public class KeyManager implements Closeable, DataEncryptionKeyFactory {
private static final Log LOG = LogFactory.getLog(KeyManager.class);
private final NamenodeProtocol namenode;
private final boolean isBlockTokenEnabled;
private final boolean encryptDataTransfer;
private boolean shouldRun;
private final BlockTokenSecretManager blockTokenSecretManager;
private final BlockKeyUpdater blockKeyUpdater;
private DataEncryptionKey encryptionKey;
public KeyManager(String blockpoolID, NamenodeProtocol namenode,
boolean encryptDataTransfer, Configuration conf) throws IOException {
this.namenode = namenode;
this.encryptDataTransfer = encryptDataTransfer;
final ExportedBlockKeys keys = namenode.getBlockKeys();
this.isBlockTokenEnabled = keys.isBlockTokenEnabled();
if (isBlockTokenEnabled) {
long updateInterval = keys.getKeyUpdateInterval();
long tokenLifetime = keys.getTokenLifetime();
LOG.info("Block token params received from NN: update interval="
+ StringUtils.formatTime(updateInterval)
+ ", token lifetime=" + StringUtils.formatTime(tokenLifetime));
String encryptionAlgorithm = conf.get(
DFSConfigKeys.DFS_DATA_ENCRYPTION_ALGORITHM_KEY);
this.blockTokenSecretManager = new BlockTokenSecretManager(
updateInterval, tokenLifetime, blockpoolID, encryptionAlgorithm);
this.blockTokenSecretManager.addKeys(keys);
// sync block keys with NN more frequently than NN updates its block keys
this.blockKeyUpdater = new BlockKeyUpdater(updateInterval / 4);
this.shouldRun = true;
} else {
this.blockTokenSecretManager = null;
this.blockKeyUpdater = null;
}
}
public void startBlockKeyUpdater() {
if (blockKeyUpdater != null) {
blockKeyUpdater.daemon.start();
}
}
/** Get an access token for a block. */
public Token<BlockTokenIdentifier> getAccessToken(ExtendedBlock eb
) throws IOException {
if (!isBlockTokenEnabled) {
return BlockTokenSecretManager.DUMMY_TOKEN;
} else {
if (!shouldRun) {
throw new IOException(
"Cannot get access token since BlockKeyUpdater is not running");
}
return blockTokenSecretManager.generateToken(null, eb,
EnumSet.of(BlockTokenIdentifier.AccessMode.REPLACE, BlockTokenIdentifier.AccessMode.COPY));
}
}
@Override
public DataEncryptionKey newDataEncryptionKey() {
if (encryptDataTransfer) {
synchronized (this) {
if (encryptionKey == null) {
encryptionKey = blockTokenSecretManager.generateDataEncryptionKey();
}
return encryptionKey;
}
} else {
return null;
}
}
@Override
public void close() {
shouldRun = false;
try {
if (blockKeyUpdater != null) {
blockKeyUpdater.daemon.interrupt();
}
} catch(Exception e) {
LOG.warn("Exception shutting down access key updater thread", e);
}
}
/**
* Periodically updates access keys.
*/
class BlockKeyUpdater implements Runnable, Closeable {
private final Daemon daemon = new Daemon(this);
private final long sleepInterval;
BlockKeyUpdater(final long sleepInterval) {
this.sleepInterval = sleepInterval;
LOG.info("Update block keys every " + StringUtils.formatTime(sleepInterval));
}
@Override
public void run() {
try {
while (shouldRun) {
try {
blockTokenSecretManager.addKeys(namenode.getBlockKeys());
} catch (IOException e) {
LOG.error("Failed to set keys", e);
}
Thread.sleep(sleepInterval);
}
} catch (InterruptedException e) {
LOG.debug("InterruptedException in block key updater thread", e);
} catch (Throwable e) {
LOG.error("Exception in block key updater thread", e);
shouldRun = false;
}
}
@Override
public void close() throws IOException {
try {
daemon.interrupt();
} catch(Exception e) {
LOG.warn("Exception shutting down key updater thread", e);
}
}
}
}
| 6,039 | 33.913295 | 101 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/MovedBlocks.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.balancer;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.util.Time;
/**
* This window makes sure to keep blocks that have been moved within a fixed
* time interval (default is 1.5 hour). Old window has blocks that are older;
* Current window has blocks that are more recent; Cleanup method triggers the
* check if blocks in the old window are more than the fixed time interval. If
* yes, purge the old window and then move blocks in current window to old
* window.
*
* @param <L> Location type
*/
public class MovedBlocks<L> {
/** A class for keeping track of a block and its locations */
public static class Locations<L> {
private final Block block; // the block
/** The locations of the replicas of the block. */
protected final List<L> locations = new ArrayList<L>(3);
public Locations(Block block) {
this.block = block;
}
/** clean block locations */
public synchronized void clearLocations() {
locations.clear();
}
/** add a location */
public synchronized void addLocation(L loc) {
if (!locations.contains(loc)) {
locations.add(loc);
}
}
/** @return if the block is located on the given location. */
public synchronized boolean isLocatedOn(L loc) {
return locations.contains(loc);
}
/** @return its locations */
public synchronized List<L> getLocations() {
return locations;
}
/* @return the block */
public Block getBlock() {
return block;
}
/* Return the length of the block */
public long getNumBytes() {
return block.getNumBytes();
}
}
private static final int CUR_WIN = 0;
private static final int OLD_WIN = 1;
private static final int NUM_WINS = 2;
private final long winTimeInterval;
private long lastCleanupTime = Time.monotonicNow();
private final List<Map<Block, Locations<L>>> movedBlocks
= new ArrayList<Map<Block, Locations<L>>>(NUM_WINS);
/** initialize the moved blocks collection */
public MovedBlocks(long winTimeInterval) {
this.winTimeInterval = winTimeInterval;
movedBlocks.add(newMap());
movedBlocks.add(newMap());
}
private Map<Block, Locations<L>> newMap() {
return new HashMap<Block, Locations<L>>();
}
/** add a block thus marking a block to be moved */
public synchronized void put(Locations<L> block) {
movedBlocks.get(CUR_WIN).put(block.getBlock(), block);
}
/** @return if a block is marked as moved */
public synchronized boolean contains(Block block) {
return movedBlocks.get(CUR_WIN).containsKey(block) ||
movedBlocks.get(OLD_WIN).containsKey(block);
}
/** remove old blocks */
public synchronized void cleanup() {
long curTime = Time.monotonicNow();
// check if old win is older than winWidth
if (lastCleanupTime + winTimeInterval <= curTime) {
// purge the old window
movedBlocks.set(OLD_WIN, movedBlocks.get(CUR_WIN));
movedBlocks.set(CUR_WIN, newMap());
lastCleanupTime = curTime;
}
}
}
| 4,023 | 31.451613 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.balancer;
import static org.apache.hadoop.hdfs.protocolPB.PBHelper.vintPrefixed;
import java.io.BufferedInputStream;
import java.io.BufferedOutputStream;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.net.Socket;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.EnumMap;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil;
import org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair;
import org.apache.hadoop.hdfs.protocol.datatransfer.Sender;
import org.apache.hadoop.hdfs.protocol.datatransfer.TrustedChannelResolver;
import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.DataTransferSaslUtil;
import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.SaslDataTransferClient;
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
import org.apache.hadoop.hdfs.server.balancer.Dispatcher.DDatanode.StorageGroup;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations;
import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations;
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.net.NetworkTopology;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.util.HostsFileReader;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.Time;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
/** Dispatching block replica moves between datanodes. */
@InterfaceAudience.Private
public class Dispatcher {
static final Log LOG = LogFactory.getLog(Dispatcher.class);
private static final long GB = 1L << 30; // 1GB
private static final long MAX_BLOCKS_SIZE_TO_FETCH = 2 * GB;
private static final int MAX_NO_PENDING_MOVE_ITERATIONS = 5;
/**
* the period of time to delay the usage of a DataNode after hitting
* errors when using it for migrating data
*/
private static long delayAfterErrors = 10 * 1000;
private final NameNodeConnector nnc;
private final SaslDataTransferClient saslClient;
/** Set of datanodes to be excluded. */
private final Set<String> excludedNodes;
/** Restrict to the following nodes. */
private final Set<String> includedNodes;
private final Collection<Source> sources = new HashSet<Source>();
private final Collection<StorageGroup> targets = new HashSet<StorageGroup>();
private final GlobalBlockMap globalBlocks = new GlobalBlockMap();
private final MovedBlocks<StorageGroup> movedBlocks;
/** Map (datanodeUuid,storageType -> StorageGroup) */
private final StorageGroupMap<StorageGroup> storageGroupMap
= new StorageGroupMap<StorageGroup>();
private NetworkTopology cluster;
private final ExecutorService moveExecutor;
private final ExecutorService dispatchExecutor;
/** The maximum number of concurrent blocks moves at a datanode */
private final int maxConcurrentMovesPerNode;
private final int ioFileBufferSize;
private static class GlobalBlockMap {
private final Map<Block, DBlock> map = new HashMap<Block, DBlock>();
/**
* Get the block from the map;
* if the block is not found, create a new block and put it in the map.
*/
private DBlock get(Block b) {
DBlock block = map.get(b);
if (block == null) {
block = new DBlock(b);
map.put(b, block);
}
return block;
}
/** Remove all blocks except for the moved blocks. */
private void removeAllButRetain(MovedBlocks<StorageGroup> movedBlocks) {
for (Iterator<Block> i = map.keySet().iterator(); i.hasNext();) {
if (!movedBlocks.contains(i.next())) {
i.remove();
}
}
}
}
public static class StorageGroupMap<G extends StorageGroup> {
private static String toKey(String datanodeUuid, StorageType storageType) {
return datanodeUuid + ":" + storageType;
}
private final Map<String, G> map = new HashMap<String, G>();
public G get(String datanodeUuid, StorageType storageType) {
return map.get(toKey(datanodeUuid, storageType));
}
public void put(G g) {
final String key = toKey(g.getDatanodeInfo().getDatanodeUuid(), g.storageType);
final StorageGroup existing = map.put(key, g);
Preconditions.checkState(existing == null);
}
int size() {
return map.size();
}
void clear() {
map.clear();
}
public Collection<G> values() {
return map.values();
}
}
/** This class keeps track of a scheduled block move */
public class PendingMove {
private DBlock block;
private Source source;
private DDatanode proxySource;
private StorageGroup target;
private PendingMove(Source source, StorageGroup target) {
this.source = source;
this.target = target;
}
@Override
public String toString() {
final Block b = block != null ? block.getBlock() : null;
String bStr = b != null ? (b + " with size=" + b.getNumBytes() + " ")
: " ";
return bStr + "from " + source.getDisplayName() + " to " + target
.getDisplayName() + " through " + (proxySource != null ? proxySource
.datanode : "");
}
/**
* Choose a block & a proxy source for this pendingMove whose source &
* target have already been chosen.
*
* @return true if a block and its proxy are chosen; false otherwise
*/
private boolean chooseBlockAndProxy() {
// source and target must have the same storage type
final StorageType t = source.getStorageType();
// iterate all source's blocks until find a good one
for (Iterator<DBlock> i = source.getBlockIterator(); i.hasNext();) {
if (markMovedIfGoodBlock(i.next(), t)) {
i.remove();
return true;
}
}
return false;
}
/**
* @return true if the given block is good for the tentative move.
*/
private boolean markMovedIfGoodBlock(DBlock block, StorageType targetStorageType) {
synchronized (block) {
synchronized (movedBlocks) {
if (isGoodBlockCandidate(source, target, targetStorageType, block)) {
this.block = block;
if (chooseProxySource()) {
movedBlocks.put(block);
if (LOG.isDebugEnabled()) {
LOG.debug("Decided to move " + this);
}
return true;
}
}
}
}
return false;
}
/**
* Choose a proxy source.
*
* @return true if a proxy is found; otherwise false
*/
private boolean chooseProxySource() {
final DatanodeInfo targetDN = target.getDatanodeInfo();
// if source and target are same nodes then no need of proxy
if (source.getDatanodeInfo().equals(targetDN) && addTo(source)) {
return true;
}
// if node group is supported, first try add nodes in the same node group
if (cluster.isNodeGroupAware()) {
for (StorageGroup loc : block.getLocations()) {
if (cluster.isOnSameNodeGroup(loc.getDatanodeInfo(), targetDN)
&& addTo(loc)) {
return true;
}
}
}
// check if there is replica which is on the same rack with the target
for (StorageGroup loc : block.getLocations()) {
if (cluster.isOnSameRack(loc.getDatanodeInfo(), targetDN) && addTo(loc)) {
return true;
}
}
// find out a non-busy replica
for (StorageGroup loc : block.getLocations()) {
if (addTo(loc)) {
return true;
}
}
return false;
}
/** add to a proxy source for specific block movement */
private boolean addTo(StorageGroup g) {
final DDatanode dn = g.getDDatanode();
if (dn.addPendingBlock(this)) {
proxySource = dn;
return true;
}
return false;
}
/** Dispatch the move to the proxy source & wait for the response. */
private void dispatch() {
if (LOG.isDebugEnabled()) {
LOG.debug("Start moving " + this);
}
Socket sock = new Socket();
DataOutputStream out = null;
DataInputStream in = null;
try {
sock.connect(
NetUtils.createSocketAddr(target.getDatanodeInfo().getXferAddr()),
HdfsServerConstants.READ_TIMEOUT);
sock.setKeepAlive(true);
OutputStream unbufOut = sock.getOutputStream();
InputStream unbufIn = sock.getInputStream();
ExtendedBlock eb = new ExtendedBlock(nnc.getBlockpoolID(),
block.getBlock());
final KeyManager km = nnc.getKeyManager();
Token<BlockTokenIdentifier> accessToken = km.getAccessToken(eb);
IOStreamPair saslStreams = saslClient.socketSend(sock, unbufOut,
unbufIn, km, accessToken, target.getDatanodeInfo());
unbufOut = saslStreams.out;
unbufIn = saslStreams.in;
out = new DataOutputStream(new BufferedOutputStream(unbufOut,
ioFileBufferSize));
in = new DataInputStream(new BufferedInputStream(unbufIn,
ioFileBufferSize));
sendRequest(out, eb, accessToken);
receiveResponse(in);
nnc.getBytesMoved().addAndGet(block.getNumBytes());
target.getDDatanode().setHasSuccess();
LOG.info("Successfully moved " + this);
} catch (IOException e) {
LOG.warn("Failed to move " + this + ": " + e.getMessage());
target.getDDatanode().setHasFailure();
// Proxy or target may have some issues, delay before using these nodes
// further in order to avoid a potential storm of "threads quota
// exceeded" warnings when the dispatcher gets out of sync with work
// going on in datanodes.
proxySource.activateDelay(delayAfterErrors);
target.getDDatanode().activateDelay(delayAfterErrors);
} finally {
IOUtils.closeStream(out);
IOUtils.closeStream(in);
IOUtils.closeSocket(sock);
proxySource.removePendingBlock(this);
target.getDDatanode().removePendingBlock(this);
synchronized (this) {
reset();
}
synchronized (Dispatcher.this) {
Dispatcher.this.notifyAll();
}
}
}
/** Send a block replace request to the output stream */
private void sendRequest(DataOutputStream out, ExtendedBlock eb,
Token<BlockTokenIdentifier> accessToken) throws IOException {
new Sender(out).replaceBlock(eb, target.storageType, accessToken,
source.getDatanodeInfo().getDatanodeUuid(), proxySource.datanode);
}
/** Receive a block copy response from the input stream */
private void receiveResponse(DataInputStream in) throws IOException {
BlockOpResponseProto response =
BlockOpResponseProto.parseFrom(vintPrefixed(in));
while (response.getStatus() == Status.IN_PROGRESS) {
// read intermediate responses
response = BlockOpResponseProto.parseFrom(vintPrefixed(in));
}
String logInfo = "block move is failed";
DataTransferProtoUtil.checkBlockOpStatus(response, logInfo);
}
/** reset the object */
private void reset() {
block = null;
source = null;
proxySource = null;
target = null;
}
}
/** A class for keeping track of block locations in the dispatcher. */
public static class DBlock extends MovedBlocks.Locations<StorageGroup> {
public DBlock(Block block) {
super(block);
}
}
/** The class represents a desired move. */
static class Task {
private final StorageGroup target;
private long size; // bytes scheduled to move
Task(StorageGroup target, long size) {
this.target = target;
this.size = size;
}
long getSize() {
return size;
}
}
/** A class that keeps track of a datanode. */
public static class DDatanode {
/** A group of storages in a datanode with the same storage type. */
public class StorageGroup {
final StorageType storageType;
final long maxSize2Move;
private long scheduledSize = 0L;
private StorageGroup(StorageType storageType, long maxSize2Move) {
this.storageType = storageType;
this.maxSize2Move = maxSize2Move;
}
public StorageType getStorageType() {
return storageType;
}
private DDatanode getDDatanode() {
return DDatanode.this;
}
public DatanodeInfo getDatanodeInfo() {
return DDatanode.this.datanode;
}
/** Decide if still need to move more bytes */
boolean hasSpaceForScheduling() {
return hasSpaceForScheduling(0L);
}
synchronized boolean hasSpaceForScheduling(long size) {
return availableSizeToMove() > size;
}
/** @return the total number of bytes that need to be moved */
synchronized long availableSizeToMove() {
return maxSize2Move - scheduledSize;
}
/** increment scheduled size */
public synchronized void incScheduledSize(long size) {
scheduledSize += size;
}
/** @return scheduled size */
synchronized long getScheduledSize() {
return scheduledSize;
}
/** Reset scheduled size to zero. */
synchronized void resetScheduledSize() {
scheduledSize = 0L;
}
private PendingMove addPendingMove(DBlock block, final PendingMove pm) {
if (getDDatanode().addPendingBlock(pm)) {
if (pm.markMovedIfGoodBlock(block, getStorageType())) {
incScheduledSize(pm.block.getNumBytes());
return pm;
} else {
getDDatanode().removePendingBlock(pm);
}
}
return null;
}
/** @return the name for display */
String getDisplayName() {
return datanode + ":" + storageType;
}
@Override
public String toString() {
return getDisplayName();
}
@Override
public int hashCode() {
return getStorageType().hashCode() ^ getDatanodeInfo().hashCode();
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
} else if (obj == null || !(obj instanceof StorageGroup)) {
return false;
} else {
final StorageGroup that = (StorageGroup) obj;
return this.getStorageType() == that.getStorageType()
&& this.getDatanodeInfo().equals(that.getDatanodeInfo());
}
}
}
final DatanodeInfo datanode;
private final EnumMap<StorageType, Source> sourceMap
= new EnumMap<StorageType, Source>(StorageType.class);
private final EnumMap<StorageType, StorageGroup> targetMap
= new EnumMap<StorageType, StorageGroup>(StorageType.class);
protected long delayUntil = 0L;
/** blocks being moved but not confirmed yet */
private final List<PendingMove> pendings;
private volatile boolean hasFailure = false;
private volatile boolean hasSuccess = false;
private final int maxConcurrentMoves;
@Override
public String toString() {
return getClass().getSimpleName() + ":" + datanode;
}
private DDatanode(DatanodeInfo datanode, int maxConcurrentMoves) {
this.datanode = datanode;
this.maxConcurrentMoves = maxConcurrentMoves;
this.pendings = new ArrayList<PendingMove>(maxConcurrentMoves);
}
public DatanodeInfo getDatanodeInfo() {
return datanode;
}
private static <G extends StorageGroup> void put(StorageType storageType,
G g, EnumMap<StorageType, G> map) {
final StorageGroup existing = map.put(storageType, g);
Preconditions.checkState(existing == null);
}
public StorageGroup addTarget(StorageType storageType, long maxSize2Move) {
final StorageGroup g = new StorageGroup(storageType, maxSize2Move);
put(storageType, g, targetMap);
return g;
}
public Source addSource(StorageType storageType, long maxSize2Move, Dispatcher d) {
final Source s = d.new Source(storageType, maxSize2Move, this);
put(storageType, s, sourceMap);
return s;
}
synchronized private void activateDelay(long delta) {
delayUntil = Time.monotonicNow() + delta;
}
synchronized private boolean isDelayActive() {
if (delayUntil == 0 || Time.monotonicNow() > delayUntil) {
delayUntil = 0;
return false;
}
return true;
}
/** Check if the node can schedule more blocks to move */
synchronized boolean isPendingQNotFull() {
return pendings.size() < maxConcurrentMoves;
}
/** Check if all the dispatched moves are done */
synchronized boolean isPendingQEmpty() {
return pendings.isEmpty();
}
/** Add a scheduled block move to the node */
synchronized boolean addPendingBlock(PendingMove pendingBlock) {
if (!isDelayActive() && isPendingQNotFull()) {
return pendings.add(pendingBlock);
}
return false;
}
/** Remove a scheduled block move from the node */
synchronized boolean removePendingBlock(PendingMove pendingBlock) {
return pendings.remove(pendingBlock);
}
void setHasFailure() {
this.hasFailure = true;
}
void setHasSuccess() {
this.hasSuccess = true;
}
}
/** A node that can be the sources of a block move */
public class Source extends DDatanode.StorageGroup {
private final List<Task> tasks = new ArrayList<Task>(2);
private long blocksToReceive = 0L;
/**
* Source blocks point to the objects in {@link Dispatcher#globalBlocks}
* because we want to keep one copy of a block and be aware that the
* locations are changing over time.
*/
private final List<DBlock> srcBlocks = new ArrayList<DBlock>();
private Source(StorageType storageType, long maxSize2Move, DDatanode dn) {
dn.super(storageType, maxSize2Move);
}
/** Add a task */
void addTask(Task task) {
Preconditions.checkState(task.target != this,
"Source and target are the same storage group " + getDisplayName());
incScheduledSize(task.size);
tasks.add(task);
}
/** @return an iterator to this source's blocks */
Iterator<DBlock> getBlockIterator() {
return srcBlocks.iterator();
}
/**
* Fetch new blocks of this source from namenode and update this source's
* block list & {@link Dispatcher#globalBlocks}.
*
* @return the total size of the received blocks in the number of bytes.
*/
private long getBlockList() throws IOException {
final long size = Math.min(MAX_BLOCKS_SIZE_TO_FETCH, blocksToReceive);
final BlocksWithLocations newBlocks = nnc.getBlocks(getDatanodeInfo(), size);
long bytesReceived = 0;
for (BlockWithLocations blk : newBlocks.getBlocks()) {
bytesReceived += blk.getBlock().getNumBytes();
synchronized (globalBlocks) {
final DBlock block = globalBlocks.get(blk.getBlock());
synchronized (block) {
block.clearLocations();
// update locations
final String[] datanodeUuids = blk.getDatanodeUuids();
final StorageType[] storageTypes = blk.getStorageTypes();
for (int i = 0; i < datanodeUuids.length; i++) {
final StorageGroup g = storageGroupMap.get(
datanodeUuids[i], storageTypes[i]);
if (g != null) { // not unknown
block.addLocation(g);
}
}
}
if (!srcBlocks.contains(block) && isGoodBlockCandidate(block)) {
// filter bad candidates
srcBlocks.add(block);
}
}
}
return bytesReceived;
}
/** Decide if the given block is a good candidate to move or not */
private boolean isGoodBlockCandidate(DBlock block) {
// source and target must have the same storage type
final StorageType sourceStorageType = getStorageType();
for (Task t : tasks) {
if (Dispatcher.this.isGoodBlockCandidate(this, t.target,
sourceStorageType, block)) {
return true;
}
}
return false;
}
/**
* Choose a move for the source. The block's source, target, and proxy
* are determined too. When choosing proxy and target, source &
* target throttling has been considered. They are chosen only when they
* have the capacity to support this block move. The block should be
* dispatched immediately after this method is returned.
*
* @return a move that's good for the source to dispatch immediately.
*/
private PendingMove chooseNextMove() {
for (Iterator<Task> i = tasks.iterator(); i.hasNext();) {
final Task task = i.next();
final DDatanode target = task.target.getDDatanode();
final PendingMove pendingBlock = new PendingMove(this, task.target);
if (target.addPendingBlock(pendingBlock)) {
// target is not busy, so do a tentative block allocation
if (pendingBlock.chooseBlockAndProxy()) {
long blockSize = pendingBlock.block.getNumBytes();
incScheduledSize(-blockSize);
task.size -= blockSize;
if (task.size == 0) {
i.remove();
}
return pendingBlock;
} else {
// cancel the tentative move
target.removePendingBlock(pendingBlock);
}
}
}
return null;
}
/** Add a pending move */
public PendingMove addPendingMove(DBlock block, StorageGroup target) {
return target.addPendingMove(block, new PendingMove(this, target));
}
/** Iterate all source's blocks to remove moved ones */
private void removeMovedBlocks() {
for (Iterator<DBlock> i = getBlockIterator(); i.hasNext();) {
if (movedBlocks.contains(i.next().getBlock())) {
i.remove();
}
}
}
private static final int SOURCE_BLOCKS_MIN_SIZE = 5;
/** @return if should fetch more blocks from namenode */
private boolean shouldFetchMoreBlocks() {
return srcBlocks.size() < SOURCE_BLOCKS_MIN_SIZE && blocksToReceive > 0;
}
private static final long MAX_ITERATION_TIME = 20 * 60 * 1000L; // 20 mins
/**
* This method iteratively does the following: it first selects a block to
* move, then sends a request to the proxy source to start the block move
* when the source's block list falls below a threshold, it asks the
* namenode for more blocks. It terminates when it has dispatch enough block
* move tasks or it has received enough blocks from the namenode, or the
* elapsed time of the iteration has exceeded the max time limit.
*/
private void dispatchBlocks() {
final long startTime = Time.monotonicNow();
this.blocksToReceive = 2 * getScheduledSize();
boolean isTimeUp = false;
int noPendingMoveIteration = 0;
while (!isTimeUp && getScheduledSize() > 0
&& (!srcBlocks.isEmpty() || blocksToReceive > 0)) {
final PendingMove p = chooseNextMove();
if (p != null) {
// Reset no pending move counter
noPendingMoveIteration=0;
executePendingMove(p);
continue;
}
// Since we cannot schedule any block to move,
// remove any moved blocks from the source block list and
removeMovedBlocks(); // filter already moved blocks
// check if we should fetch more blocks from the namenode
if (shouldFetchMoreBlocks()) {
// fetch new blocks
try {
blocksToReceive -= getBlockList();
continue;
} catch (IOException e) {
LOG.warn("Exception while getting block list", e);
return;
}
} else {
// source node cannot find a pending block to move, iteration +1
noPendingMoveIteration++;
// in case no blocks can be moved for source node's task,
// jump out of while-loop after 5 iterations.
if (noPendingMoveIteration >= MAX_NO_PENDING_MOVE_ITERATIONS) {
resetScheduledSize();
}
}
// check if time is up or not
if (Time.monotonicNow() - startTime > MAX_ITERATION_TIME) {
isTimeUp = true;
continue;
}
// Now we can not schedule any block to move and there are
// no new blocks added to the source block list, so we wait.
try {
synchronized (Dispatcher.this) {
Dispatcher.this.wait(1000); // wait for targets/sources to be idle
}
} catch (InterruptedException ignored) {
}
}
}
@Override
public int hashCode() {
return super.hashCode();
}
@Override
public boolean equals(Object obj) {
return super.equals(obj);
}
}
public Dispatcher(NameNodeConnector nnc, Set<String> includedNodes,
Set<String> excludedNodes, long movedWinWidth, int moverThreads,
int dispatcherThreads, int maxConcurrentMovesPerNode, Configuration conf) {
this.nnc = nnc;
this.excludedNodes = excludedNodes;
this.includedNodes = includedNodes;
this.movedBlocks = new MovedBlocks<StorageGroup>(movedWinWidth);
this.cluster = NetworkTopology.getInstance(conf);
this.moveExecutor = Executors.newFixedThreadPool(moverThreads);
this.dispatchExecutor = dispatcherThreads == 0? null
: Executors.newFixedThreadPool(dispatcherThreads);
this.maxConcurrentMovesPerNode = maxConcurrentMovesPerNode;
this.saslClient = new SaslDataTransferClient(conf,
DataTransferSaslUtil.getSaslPropertiesResolver(conf),
TrustedChannelResolver.getInstance(conf), nnc.fallbackToSimpleAuth);
this.ioFileBufferSize = DFSUtil.getIoFileBufferSize(conf);
}
public DistributedFileSystem getDistributedFileSystem() {
return nnc.getDistributedFileSystem();
}
public StorageGroupMap<StorageGroup> getStorageGroupMap() {
return storageGroupMap;
}
public NetworkTopology getCluster() {
return cluster;
}
long getBytesMoved() {
return nnc.getBytesMoved().get();
}
long bytesToMove() {
Preconditions.checkState(
storageGroupMap.size() >= sources.size() + targets.size(),
"Mismatched number of storage groups (" + storageGroupMap.size()
+ " < " + sources.size() + " sources + " + targets.size()
+ " targets)");
long b = 0L;
for (Source src : sources) {
b += src.getScheduledSize();
}
return b;
}
void add(Source source, StorageGroup target) {
sources.add(source);
targets.add(target);
}
private boolean shouldIgnore(DatanodeInfo dn) {
// ignore decommissioned nodes
final boolean decommissioned = dn.isDecommissioned();
// ignore decommissioning nodes
final boolean decommissioning = dn.isDecommissionInProgress();
// ignore nodes in exclude list
final boolean excluded = Util.isExcluded(excludedNodes, dn);
// ignore nodes not in the include list (if include list is not empty)
final boolean notIncluded = !Util.isIncluded(includedNodes, dn);
if (decommissioned || decommissioning || excluded || notIncluded) {
if (LOG.isTraceEnabled()) {
LOG.trace("Excluding datanode " + dn + ": " + decommissioned + ", "
+ decommissioning + ", " + excluded + ", " + notIncluded);
}
return true;
}
return false;
}
/** Get live datanode storage reports and then build the network topology. */
public List<DatanodeStorageReport> init() throws IOException {
final DatanodeStorageReport[] reports = nnc.getLiveDatanodeStorageReport();
final List<DatanodeStorageReport> trimmed = new ArrayList<DatanodeStorageReport>();
// create network topology and classify utilization collections:
// over-utilized, above-average, below-average and under-utilized.
for (DatanodeStorageReport r : DFSUtil.shuffle(reports)) {
final DatanodeInfo datanode = r.getDatanodeInfo();
if (shouldIgnore(datanode)) {
continue;
}
trimmed.add(r);
cluster.add(datanode);
}
return trimmed;
}
public DDatanode newDatanode(DatanodeInfo datanode) {
return new DDatanode(datanode, maxConcurrentMovesPerNode);
}
public void executePendingMove(final PendingMove p) {
// move the block
moveExecutor.execute(new Runnable() {
@Override
public void run() {
p.dispatch();
}
});
}
public boolean dispatchAndCheckContinue() throws InterruptedException {
return nnc.shouldContinue(dispatchBlockMoves());
}
/**
* Dispatch block moves for each source. The thread selects blocks to move &
* sends request to proxy source to initiate block move. The process is flow
* controlled. Block selection is blocked if there are too many un-confirmed
* block moves.
*
* @return the total number of bytes successfully moved in this iteration.
*/
private long dispatchBlockMoves() throws InterruptedException {
final long bytesLastMoved = getBytesMoved();
final Future<?>[] futures = new Future<?>[sources.size()];
final Iterator<Source> i = sources.iterator();
for (int j = 0; j < futures.length; j++) {
final Source s = i.next();
futures[j] = dispatchExecutor.submit(new Runnable() {
@Override
public void run() {
s.dispatchBlocks();
}
});
}
// wait for all dispatcher threads to finish
for (Future<?> future : futures) {
try {
future.get();
} catch (ExecutionException e) {
LOG.warn("Dispatcher thread failed", e.getCause());
}
}
// wait for all block moving to be done
waitForMoveCompletion(targets);
return getBytesMoved() - bytesLastMoved;
}
/** The sleeping period before checking if block move is completed again */
static private long blockMoveWaitTime = 30000L;
/**
* Wait for all block move confirmations.
* @return true if there is failed move execution
*/
public static boolean waitForMoveCompletion(
Iterable<? extends StorageGroup> targets) {
boolean hasFailure = false;
for(;;) {
boolean empty = true;
for (StorageGroup t : targets) {
if (!t.getDDatanode().isPendingQEmpty()) {
empty = false;
break;
} else {
hasFailure |= t.getDDatanode().hasFailure;
}
}
if (empty) {
return hasFailure; // all pending queues are empty
}
try {
Thread.sleep(blockMoveWaitTime);
} catch (InterruptedException ignored) {
}
}
}
/**
* @return true if some moves are success.
*/
public static boolean checkForSuccess(
Iterable<? extends StorageGroup> targets) {
boolean hasSuccess = false;
for (StorageGroup t : targets) {
hasSuccess |= t.getDDatanode().hasSuccess;
}
return hasSuccess;
}
/**
* Decide if the block is a good candidate to be moved from source to target.
* A block is a good candidate if
* 1. the block is not in the process of being moved/has not been moved;
* 2. the block does not have a replica on the target;
* 3. doing the move does not reduce the number of racks that the block has
*/
private boolean isGoodBlockCandidate(StorageGroup source, StorageGroup target,
StorageType targetStorageType, DBlock block) {
if (source.equals(target)) {
return false;
}
if (target.storageType != targetStorageType) {
return false;
}
// check if the block is moved or not
if (movedBlocks.contains(block.getBlock())) {
return false;
}
final DatanodeInfo targetDatanode = target.getDatanodeInfo();
if (source.getDatanodeInfo().equals(targetDatanode)) {
// the block is moved inside same DN
return true;
}
// check if block has replica in target node
for (StorageGroup blockLocation : block.getLocations()) {
if (blockLocation.getDatanodeInfo().equals(targetDatanode)) {
return false;
}
}
if (cluster.isNodeGroupAware()
&& isOnSameNodeGroupWithReplicas(source, target, block)) {
return false;
}
if (reduceNumOfRacks(source, target, block)) {
return false;
}
return true;
}
/**
* Determine whether moving the given block replica from source to target
* would reduce the number of racks of the block replicas.
*/
private boolean reduceNumOfRacks(StorageGroup source, StorageGroup target,
DBlock block) {
final DatanodeInfo sourceDn = source.getDatanodeInfo();
if (cluster.isOnSameRack(sourceDn, target.getDatanodeInfo())) {
// source and target are on the same rack
return false;
}
boolean notOnSameRack = true;
synchronized (block) {
for (StorageGroup loc : block.getLocations()) {
if (cluster.isOnSameRack(loc.getDatanodeInfo(), target.getDatanodeInfo())) {
notOnSameRack = false;
break;
}
}
}
if (notOnSameRack) {
// target is not on the same rack as any replica
return false;
}
for (StorageGroup g : block.getLocations()) {
if (g != source && cluster.isOnSameRack(g.getDatanodeInfo(), sourceDn)) {
// source is on the same rack of another replica
return false;
}
}
return true;
}
/**
* Check if there are any replica (other than source) on the same node group
* with target. If true, then target is not a good candidate for placing
* specific replica as we don't want 2 replicas under the same nodegroup.
*
* @return true if there are any replica (other than source) on the same node
* group with target
*/
private boolean isOnSameNodeGroupWithReplicas(StorageGroup source,
StorageGroup target, DBlock block) {
final DatanodeInfo targetDn = target.getDatanodeInfo();
for (StorageGroup g : block.getLocations()) {
if (g != source && cluster.isOnSameNodeGroup(g.getDatanodeInfo(), targetDn)) {
return true;
}
}
return false;
}
/** Reset all fields in order to prepare for the next iteration */
void reset(Configuration conf) {
cluster = NetworkTopology.getInstance(conf);
storageGroupMap.clear();
sources.clear();
targets.clear();
globalBlocks.removeAllButRetain(movedBlocks);
movedBlocks.cleanup();
}
/** set the sleeping period for block move completion check */
@VisibleForTesting
public static void setBlockMoveWaitTime(long time) {
blockMoveWaitTime = time;
}
@VisibleForTesting
public static void setDelayAfterErrors(long time) {
delayAfterErrors = time;
}
/** shutdown thread pools */
public void shutdownNow() {
if (dispatchExecutor != null) {
dispatchExecutor.shutdownNow();
}
moveExecutor.shutdownNow();
}
static class Util {
/** @return true if data node is part of the excludedNodes. */
static boolean isExcluded(Set<String> excludedNodes, DatanodeInfo dn) {
return isIn(excludedNodes, dn);
}
/**
* @return true if includedNodes is empty or data node is part of the
* includedNodes.
*/
static boolean isIncluded(Set<String> includedNodes, DatanodeInfo dn) {
return (includedNodes.isEmpty() || isIn(includedNodes, dn));
}
/**
* Match is checked using host name , ip address with and without port
* number.
*
* @return true if the datanode's transfer address matches the set of nodes.
*/
private static boolean isIn(Set<String> datanodes, DatanodeInfo dn) {
return isIn(datanodes, dn.getPeerHostName(), dn.getXferPort())
|| isIn(datanodes, dn.getIpAddr(), dn.getXferPort())
|| isIn(datanodes, dn.getHostName(), dn.getXferPort());
}
/** @return true if nodes contains host or host:port */
private static boolean isIn(Set<String> nodes, String host, int port) {
if (host == null) {
return false;
}
return (nodes.contains(host) || nodes.contains(host + ":" + port));
}
/**
* Parse a comma separated string to obtain set of host names
*
* @return set of host names
*/
static Set<String> parseHostList(String string) {
String[] addrs = StringUtils.getTrimmedStrings(string);
return new HashSet<String>(Arrays.asList(addrs));
}
/**
* Read set of host names from a file
*
* @return set of host names
*/
static Set<String> getHostListFromFile(String fileName, String type) {
Set<String> nodes = new HashSet<String>();
try {
HostsFileReader.readFileToSet(type, fileName, nodes);
return StringUtils.getTrimmedStrings(nodes);
} catch (IOException e) {
throw new IllegalArgumentException(
"Failed to read host list from file: " + fileName);
}
}
}
}
| 39,267 | 32.533732 | 88 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.balancer;
import static com.google.common.base.Preconditions.checkArgument;
import java.io.IOException;
import java.io.PrintStream;
import java.net.URI;
import java.text.DateFormat;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.Date;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
import java.util.Set;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.server.balancer.Dispatcher.DDatanode;
import org.apache.hadoop.hdfs.server.balancer.Dispatcher.DDatanode.StorageGroup;
import org.apache.hadoop.hdfs.server.balancer.Dispatcher.Source;
import org.apache.hadoop.hdfs.server.balancer.Dispatcher.Task;
import org.apache.hadoop.hdfs.server.balancer.Dispatcher.Util;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyDefault;
import org.apache.hadoop.hdfs.server.namenode.UnsupportedActionException;
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport;
import org.apache.hadoop.hdfs.server.protocol.StorageReport;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.Time;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
import static com.google.common.base.Preconditions.checkArgument;
import com.google.common.base.Preconditions;
/** <p>The balancer is a tool that balances disk space usage on an HDFS cluster
* when some datanodes become full or when new empty nodes join the cluster.
* The tool is deployed as an application program that can be run by the
* cluster administrator on a live HDFS cluster while applications
* adding and deleting files.
*
* <p>SYNOPSIS
* <pre>
* To start:
* bin/start-balancer.sh [-threshold <threshold>]
* Example: bin/ start-balancer.sh
* start the balancer with a default threshold of 10%
* bin/ start-balancer.sh -threshold 5
* start the balancer with a threshold of 5%
* bin/ start-balancer.sh -idleiterations 20
* start the balancer with maximum 20 consecutive idle iterations
* bin/ start-balancer.sh -idleiterations -1
* run the balancer with default threshold infinitely
* To stop:
* bin/ stop-balancer.sh
* </pre>
*
* <p>DESCRIPTION
* <p>The threshold parameter is a fraction in the range of (1%, 100%) with a
* default value of 10%. The threshold sets a target for whether the cluster
* is balanced. A cluster is balanced if for each datanode, the utilization
* of the node (ratio of used space at the node to total capacity of the node)
* differs from the utilization of the (ratio of used space in the cluster
* to total capacity of the cluster) by no more than the threshold value.
* The smaller the threshold, the more balanced a cluster will become.
* It takes more time to run the balancer for small threshold values.
* Also for a very small threshold the cluster may not be able to reach the
* balanced state when applications write and delete files concurrently.
*
* <p>The tool moves blocks from highly utilized datanodes to poorly
* utilized datanodes iteratively. In each iteration a datanode moves or
* receives no more than the lesser of 10G bytes or the threshold fraction
* of its capacity. Each iteration runs no more than 20 minutes.
* At the end of each iteration, the balancer obtains updated datanodes
* information from the namenode.
*
* <p>A system property that limits the balancer's use of bandwidth is
* defined in the default configuration file:
* <pre>
* <property>
* <name>dfs.balance.bandwidthPerSec</name>
* <value>1048576</value>
* <description> Specifies the maximum bandwidth that each datanode
* can utilize for the balancing purpose in term of the number of bytes
* per second. </description>
* </property>
* </pre>
*
* <p>This property determines the maximum speed at which a block will be
* moved from one datanode to another. The default value is 1MB/s. The higher
* the bandwidth, the faster a cluster can reach the balanced state,
* but with greater competition with application processes. If an
* administrator changes the value of this property in the configuration
* file, the change is observed when HDFS is next restarted.
*
* <p>MONITERING BALANCER PROGRESS
* <p>After the balancer is started, an output file name where the balancer
* progress will be recorded is printed on the screen. The administrator
* can monitor the running of the balancer by reading the output file.
* The output shows the balancer's status iteration by iteration. In each
* iteration it prints the starting time, the iteration number, the total
* number of bytes that have been moved in the previous iterations,
* the total number of bytes that are left to move in order for the cluster
* to be balanced, and the number of bytes that are being moved in this
* iteration. Normally "Bytes Already Moved" is increasing while "Bytes Left
* To Move" is decreasing.
*
* <p>Running multiple instances of the balancer in an HDFS cluster is
* prohibited by the tool.
*
* <p>The balancer automatically exits when any of the following five
* conditions is satisfied:
* <ol>
* <li>The cluster is balanced;
* <li>No block can be moved;
* <li>No block has been moved for specified consecutive iterations (5 by default);
* <li>An IOException occurs while communicating with the namenode;
* <li>Another balancer is running.
* </ol>
*
* <p>Upon exit, a balancer returns an exit code and prints one of the
* following messages to the output file in corresponding to the above exit
* reasons:
* <ol>
* <li>The cluster is balanced. Exiting
* <li>No block can be moved. Exiting...
* <li>No block has been moved for specified iterations (5 by default). Exiting...
* <li>Received an IO exception: failure reason. Exiting...
* <li>Another balancer is running. Exiting...
* </ol>
*
* <p>The administrator can interrupt the execution of the balancer at any
* time by running the command "stop-balancer.sh" on the machine where the
* balancer is running.
*/
@InterfaceAudience.Private
public class Balancer {
static final Log LOG = LogFactory.getLog(Balancer.class);
static final Path BALANCER_ID_PATH = new Path("/system/balancer.id");
private static final long GB = 1L << 30; //1GB
private static final long MAX_SIZE_TO_MOVE = 10*GB;
private static final String USAGE = "Usage: hdfs balancer"
+ "\n\t[-policy <policy>]\tthe balancing policy: "
+ BalancingPolicy.Node.INSTANCE.getName() + " or "
+ BalancingPolicy.Pool.INSTANCE.getName()
+ "\n\t[-threshold <threshold>]\tPercentage of disk capacity"
+ "\n\t[-exclude [-f <hosts-file> | <comma-separated list of hosts>]]"
+ "\tExcludes the specified datanodes."
+ "\n\t[-include [-f <hosts-file> | <comma-separated list of hosts>]]"
+ "\tIncludes only the specified datanodes."
+ "\n\t[-idleiterations <idleiterations>]"
+ "\tNumber of consecutive idle iterations (-1 for Infinite) before "
+ "exit."
+ "\n\t[-runDuringUpgrade]"
+ "\tWhether to run the balancer during an ongoing HDFS upgrade."
+ "This is usually not desired since it will not affect used space "
+ "on over-utilized machines.";
private final Dispatcher dispatcher;
private final NameNodeConnector nnc;
private final BalancingPolicy policy;
private final boolean runDuringUpgrade;
private final double threshold;
// all data node lists
private final Collection<Source> overUtilized = new LinkedList<Source>();
private final Collection<Source> aboveAvgUtilized = new LinkedList<Source>();
private final Collection<StorageGroup> belowAvgUtilized
= new LinkedList<StorageGroup>();
private final Collection<StorageGroup> underUtilized
= new LinkedList<StorageGroup>();
/* Check that this Balancer is compatible with the Block Placement Policy
* used by the Namenode.
*/
private static void checkReplicationPolicyCompatibility(Configuration conf
) throws UnsupportedActionException {
if (!(BlockPlacementPolicy.getInstance(conf, null, null, null) instanceof
BlockPlacementPolicyDefault)) {
throw new UnsupportedActionException(
"Balancer without BlockPlacementPolicyDefault");
}
}
/**
* Construct a balancer.
* Initialize balancer. It sets the value of the threshold, and
* builds the communication proxies to
* namenode as a client and a secondary namenode and retry proxies
* when connection fails.
*/
Balancer(NameNodeConnector theblockpool, Parameters p, Configuration conf) {
final long movedWinWidth = conf.getLong(
DFSConfigKeys.DFS_BALANCER_MOVEDWINWIDTH_KEY,
DFSConfigKeys.DFS_BALANCER_MOVEDWINWIDTH_DEFAULT);
final int moverThreads = conf.getInt(
DFSConfigKeys.DFS_BALANCER_MOVERTHREADS_KEY,
DFSConfigKeys.DFS_BALANCER_MOVERTHREADS_DEFAULT);
final int dispatcherThreads = conf.getInt(
DFSConfigKeys.DFS_BALANCER_DISPATCHERTHREADS_KEY,
DFSConfigKeys.DFS_BALANCER_DISPATCHERTHREADS_DEFAULT);
final int maxConcurrentMovesPerNode = conf.getInt(
DFSConfigKeys.DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_KEY,
DFSConfigKeys.DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_DEFAULT);
this.nnc = theblockpool;
this.dispatcher = new Dispatcher(theblockpool, p.nodesToBeIncluded,
p.nodesToBeExcluded, movedWinWidth, moverThreads, dispatcherThreads,
maxConcurrentMovesPerNode, conf);
this.threshold = p.threshold;
this.policy = p.policy;
this.runDuringUpgrade = p.runDuringUpgrade;
}
private static long getCapacity(DatanodeStorageReport report, StorageType t) {
long capacity = 0L;
for(StorageReport r : report.getStorageReports()) {
if (r.getStorage().getStorageType() == t) {
capacity += r.getCapacity();
}
}
return capacity;
}
private static long getRemaining(DatanodeStorageReport report, StorageType t) {
long remaining = 0L;
for(StorageReport r : report.getStorageReports()) {
if (r.getStorage().getStorageType() == t) {
remaining += r.getRemaining();
}
}
return remaining;
}
/**
* Given a datanode storage set, build a network topology and decide
* over-utilized storages, above average utilized storages,
* below average utilized storages, and underutilized storages.
* The input datanode storage set is shuffled in order to randomize
* to the storage matching later on.
*
* @return the number of bytes needed to move in order to balance the cluster.
*/
private long init(List<DatanodeStorageReport> reports) {
// compute average utilization
for (DatanodeStorageReport r : reports) {
policy.accumulateSpaces(r);
}
policy.initAvgUtilization();
// create network topology and classify utilization collections:
// over-utilized, above-average, below-average and under-utilized.
long overLoadedBytes = 0L, underLoadedBytes = 0L;
for(DatanodeStorageReport r : reports) {
final DDatanode dn = dispatcher.newDatanode(r.getDatanodeInfo());
for(StorageType t : StorageType.getMovableTypes()) {
final Double utilization = policy.getUtilization(r, t);
if (utilization == null) { // datanode does not have such storage type
continue;
}
final long capacity = getCapacity(r, t);
final double utilizationDiff = utilization - policy.getAvgUtilization(t);
final double thresholdDiff = Math.abs(utilizationDiff) - threshold;
final long maxSize2Move = computeMaxSize2Move(capacity,
getRemaining(r, t), utilizationDiff, threshold);
final StorageGroup g;
if (utilizationDiff > 0) {
final Source s = dn.addSource(t, maxSize2Move, dispatcher);
if (thresholdDiff <= 0) { // within threshold
aboveAvgUtilized.add(s);
} else {
overLoadedBytes += percentage2bytes(thresholdDiff, capacity);
overUtilized.add(s);
}
g = s;
} else {
g = dn.addTarget(t, maxSize2Move);
if (thresholdDiff <= 0) { // within threshold
belowAvgUtilized.add(g);
} else {
underLoadedBytes += percentage2bytes(thresholdDiff, capacity);
underUtilized.add(g);
}
}
dispatcher.getStorageGroupMap().put(g);
}
}
logUtilizationCollections();
Preconditions.checkState(dispatcher.getStorageGroupMap().size()
== overUtilized.size() + underUtilized.size() + aboveAvgUtilized.size()
+ belowAvgUtilized.size(),
"Mismatched number of storage groups");
// return number of bytes to be moved in order to make the cluster balanced
return Math.max(overLoadedBytes, underLoadedBytes);
}
private static long computeMaxSize2Move(final long capacity, final long remaining,
final double utilizationDiff, final double threshold) {
final double diff = Math.min(threshold, Math.abs(utilizationDiff));
long maxSizeToMove = percentage2bytes(diff, capacity);
if (utilizationDiff < 0) {
maxSizeToMove = Math.min(remaining, maxSizeToMove);
}
return Math.min(MAX_SIZE_TO_MOVE, maxSizeToMove);
}
private static long percentage2bytes(double percentage, long capacity) {
Preconditions.checkArgument(percentage >= 0, "percentage = %s < 0",
percentage);
return (long)(percentage * capacity / 100.0);
}
/* log the over utilized & under utilized nodes */
private void logUtilizationCollections() {
logUtilizationCollection("over-utilized", overUtilized);
if (LOG.isTraceEnabled()) {
logUtilizationCollection("above-average", aboveAvgUtilized);
logUtilizationCollection("below-average", belowAvgUtilized);
}
logUtilizationCollection("underutilized", underUtilized);
}
private static <T extends StorageGroup>
void logUtilizationCollection(String name, Collection<T> items) {
LOG.info(items.size() + " " + name + ": " + items);
}
/**
* Decide all <source, target> pairs and
* the number of bytes to move from a source to a target
* Maximum bytes to be moved per storage group is
* min(1 Band worth of bytes, MAX_SIZE_TO_MOVE).
* @return total number of bytes to move in this iteration
*/
private long chooseStorageGroups() {
// First, match nodes on the same node group if cluster is node group aware
if (dispatcher.getCluster().isNodeGroupAware()) {
chooseStorageGroups(Matcher.SAME_NODE_GROUP);
}
// Then, match nodes on the same rack
chooseStorageGroups(Matcher.SAME_RACK);
// At last, match all remaining nodes
chooseStorageGroups(Matcher.ANY_OTHER);
return dispatcher.bytesToMove();
}
/** Decide all <source, target> pairs according to the matcher. */
private void chooseStorageGroups(final Matcher matcher) {
/* first step: match each overUtilized datanode (source) to
* one or more underUtilized datanodes (targets).
*/
chooseStorageGroups(overUtilized, underUtilized, matcher);
/* match each remaining overutilized datanode (source) to
* below average utilized datanodes (targets).
* Note only overutilized datanodes that haven't had that max bytes to move
* satisfied in step 1 are selected
*/
chooseStorageGroups(overUtilized, belowAvgUtilized, matcher);
/* match each remaining underutilized datanode (target) to
* above average utilized datanodes (source).
* Note only underutilized datanodes that have not had that max bytes to
* move satisfied in step 1 are selected.
*/
chooseStorageGroups(underUtilized, aboveAvgUtilized, matcher);
}
/**
* For each datanode, choose matching nodes from the candidates. Either the
* datanodes or the candidates are source nodes with (utilization > Avg), and
* the others are target nodes with (utilization < Avg).
*/
private <G extends StorageGroup, C extends StorageGroup>
void chooseStorageGroups(Collection<G> groups, Collection<C> candidates,
Matcher matcher) {
for(final Iterator<G> i = groups.iterator(); i.hasNext();) {
final G g = i.next();
for(; choose4One(g, candidates, matcher); );
if (!g.hasSpaceForScheduling()) {
i.remove();
}
}
}
/**
* For the given datanode, choose a candidate and then schedule it.
* @return true if a candidate is chosen; false if no candidates is chosen.
*/
private <C extends StorageGroup> boolean choose4One(StorageGroup g,
Collection<C> candidates, Matcher matcher) {
final Iterator<C> i = candidates.iterator();
final C chosen = chooseCandidate(g, i, matcher);
if (chosen == null) {
return false;
}
if (g instanceof Source) {
matchSourceWithTargetToMove((Source)g, chosen);
} else {
matchSourceWithTargetToMove((Source)chosen, g);
}
if (!chosen.hasSpaceForScheduling()) {
i.remove();
}
return true;
}
private void matchSourceWithTargetToMove(Source source, StorageGroup target) {
long size = Math.min(source.availableSizeToMove(), target.availableSizeToMove());
final Task task = new Task(target, size);
source.addTask(task);
target.incScheduledSize(task.getSize());
dispatcher.add(source, target);
LOG.info("Decided to move "+StringUtils.byteDesc(size)+" bytes from "
+ source.getDisplayName() + " to " + target.getDisplayName());
}
/** Choose a candidate for the given datanode. */
private <G extends StorageGroup, C extends StorageGroup>
C chooseCandidate(G g, Iterator<C> candidates, Matcher matcher) {
if (g.hasSpaceForScheduling()) {
for(; candidates.hasNext(); ) {
final C c = candidates.next();
if (!c.hasSpaceForScheduling()) {
candidates.remove();
} else if (matcher.match(dispatcher.getCluster(),
g.getDatanodeInfo(), c.getDatanodeInfo())) {
return c;
}
}
}
return null;
}
/* reset all fields in a balancer preparing for the next iteration */
void resetData(Configuration conf) {
this.overUtilized.clear();
this.aboveAvgUtilized.clear();
this.belowAvgUtilized.clear();
this.underUtilized.clear();
this.policy.reset();
dispatcher.reset(conf);;
}
static class Result {
final ExitStatus exitStatus;
final long bytesLeftToMove;
final long bytesBeingMoved;
final long bytesAlreadyMoved;
Result(ExitStatus exitStatus, long bytesLeftToMove, long bytesBeingMoved,
long bytesAlreadyMoved) {
this.exitStatus = exitStatus;
this.bytesLeftToMove = bytesLeftToMove;
this.bytesBeingMoved = bytesBeingMoved;
this.bytesAlreadyMoved = bytesAlreadyMoved;
}
void print(int iteration, PrintStream out) {
out.printf("%-24s %10d %19s %18s %17s%n",
DateFormat.getDateTimeInstance().format(new Date()), iteration,
StringUtils.byteDesc(bytesAlreadyMoved),
StringUtils.byteDesc(bytesLeftToMove),
StringUtils.byteDesc(bytesBeingMoved));
}
}
Result newResult(ExitStatus exitStatus, long bytesLeftToMove, long bytesBeingMoved) {
return new Result(exitStatus, bytesLeftToMove, bytesBeingMoved,
dispatcher.getBytesMoved());
}
Result newResult(ExitStatus exitStatus) {
return new Result(exitStatus, -1, -1, dispatcher.getBytesMoved());
}
/** Run an iteration for all datanodes. */
Result runOneIteration() {
try {
final List<DatanodeStorageReport> reports = dispatcher.init();
final long bytesLeftToMove = init(reports);
if (bytesLeftToMove == 0) {
System.out.println("The cluster is balanced. Exiting...");
return newResult(ExitStatus.SUCCESS, bytesLeftToMove, -1);
} else {
LOG.info( "Need to move "+ StringUtils.byteDesc(bytesLeftToMove)
+ " to make the cluster balanced." );
}
// Should not run the balancer during an unfinalized upgrade, since moved
// blocks are not deleted on the source datanode.
if (!runDuringUpgrade && nnc.isUpgrading()) {
return newResult(ExitStatus.UNFINALIZED_UPGRADE, bytesLeftToMove, -1);
}
/* Decide all the nodes that will participate in the block move and
* the number of bytes that need to be moved from one node to another
* in this iteration. Maximum bytes to be moved per node is
* Min(1 Band worth of bytes, MAX_SIZE_TO_MOVE).
*/
final long bytesBeingMoved = chooseStorageGroups();
if (bytesBeingMoved == 0) {
System.out.println("No block can be moved. Exiting...");
return newResult(ExitStatus.NO_MOVE_BLOCK, bytesLeftToMove, bytesBeingMoved);
} else {
LOG.info( "Will move " + StringUtils.byteDesc(bytesBeingMoved) +
" in this iteration");
}
/* For each pair of <source, target>, start a thread that repeatedly
* decide a block to be moved and its proxy source,
* then initiates the move until all bytes are moved or no more block
* available to move.
* Exit no byte has been moved for 5 consecutive iterations.
*/
if (!dispatcher.dispatchAndCheckContinue()) {
return newResult(ExitStatus.NO_MOVE_PROGRESS, bytesLeftToMove, bytesBeingMoved);
}
return newResult(ExitStatus.IN_PROGRESS, bytesLeftToMove, bytesBeingMoved);
} catch (IllegalArgumentException e) {
System.out.println(e + ". Exiting ...");
return newResult(ExitStatus.ILLEGAL_ARGUMENTS);
} catch (IOException e) {
System.out.println(e + ". Exiting ...");
return newResult(ExitStatus.IO_EXCEPTION);
} catch (InterruptedException e) {
System.out.println(e + ". Exiting ...");
return newResult(ExitStatus.INTERRUPTED);
} finally {
dispatcher.shutdownNow();
}
}
/**
* Balance all namenodes.
* For each iteration,
* for each namenode,
* execute a {@link Balancer} to work through all datanodes once.
*/
static int run(Collection<URI> namenodes, final Parameters p,
Configuration conf) throws IOException, InterruptedException {
final long sleeptime =
conf.getLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY,
DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_DEFAULT) * 2000 +
conf.getLong(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY,
DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_DEFAULT) * 1000;
LOG.info("namenodes = " + namenodes);
LOG.info("parameters = " + p);
System.out.println("Time Stamp Iteration# Bytes Already Moved Bytes Left To Move Bytes Being Moved");
List<NameNodeConnector> connectors = Collections.emptyList();
try {
connectors = NameNodeConnector.newNameNodeConnectors(namenodes,
Balancer.class.getSimpleName(), BALANCER_ID_PATH, conf, p.maxIdleIteration);
boolean done = false;
for(int iteration = 0; !done; iteration++) {
done = true;
Collections.shuffle(connectors);
for(NameNodeConnector nnc : connectors) {
final Balancer b = new Balancer(nnc, p, conf);
final Result r = b.runOneIteration();
r.print(iteration, System.out);
// clean all lists
b.resetData(conf);
if (r.exitStatus == ExitStatus.IN_PROGRESS) {
done = false;
} else if (r.exitStatus != ExitStatus.SUCCESS) {
//must be an error statue, return.
return r.exitStatus.getExitCode();
}
}
if (!done) {
Thread.sleep(sleeptime);
}
}
} finally {
for(NameNodeConnector nnc : connectors) {
IOUtils.cleanup(LOG, nnc);
}
}
return ExitStatus.SUCCESS.getExitCode();
}
/* Given elaspedTime in ms, return a printable string */
private static String time2Str(long elapsedTime) {
String unit;
double time = elapsedTime;
if (elapsedTime < 1000) {
unit = "milliseconds";
} else if (elapsedTime < 60*1000) {
unit = "seconds";
time = time/1000;
} else if (elapsedTime < 3600*1000) {
unit = "minutes";
time = time/(60*1000);
} else {
unit = "hours";
time = time/(3600*1000);
}
return time+" "+unit;
}
static class Parameters {
static final Parameters DEFAULT = new Parameters(
BalancingPolicy.Node.INSTANCE, 10.0,
NameNodeConnector.DEFAULT_MAX_IDLE_ITERATIONS,
Collections.<String> emptySet(), Collections.<String> emptySet(),
false);
final BalancingPolicy policy;
final double threshold;
final int maxIdleIteration;
// exclude the nodes in this set from balancing operations
Set<String> nodesToBeExcluded;
//include only these nodes in balancing operations
Set<String> nodesToBeIncluded;
/**
* Whether to run the balancer during upgrade.
*/
final boolean runDuringUpgrade;
Parameters(BalancingPolicy policy, double threshold, int maxIdleIteration,
Set<String> nodesToBeExcluded, Set<String> nodesToBeIncluded,
boolean runDuringUpgrade) {
this.policy = policy;
this.threshold = threshold;
this.maxIdleIteration = maxIdleIteration;
this.nodesToBeExcluded = nodesToBeExcluded;
this.nodesToBeIncluded = nodesToBeIncluded;
this.runDuringUpgrade = runDuringUpgrade;
}
@Override
public String toString() {
return String.format("%s.%s [%s,"
+ " threshold = %s,"
+ " max idle iteration = %s, "
+ "number of nodes to be excluded = %s,"
+ " number of nodes to be included = %s,"
+ " run during upgrade = %s]",
Balancer.class.getSimpleName(), getClass().getSimpleName(),
policy, threshold, maxIdleIteration,
nodesToBeExcluded.size(), nodesToBeIncluded.size(),
runDuringUpgrade);
}
}
static class Cli extends Configured implements Tool {
/**
* Parse arguments and then run Balancer.
*
* @param args command specific arguments.
* @return exit code. 0 indicates success, non-zero indicates failure.
*/
@Override
public int run(String[] args) {
final long startTime = Time.monotonicNow();
final Configuration conf = getConf();
try {
checkReplicationPolicyCompatibility(conf);
final Collection<URI> namenodes = DFSUtil.getNsServiceRpcUris(conf);
return Balancer.run(namenodes, parse(args), conf);
} catch (IOException e) {
System.out.println(e + ". Exiting ...");
return ExitStatus.IO_EXCEPTION.getExitCode();
} catch (InterruptedException e) {
System.out.println(e + ". Exiting ...");
return ExitStatus.INTERRUPTED.getExitCode();
} finally {
System.out.format("%-24s ",
DateFormat.getDateTimeInstance().format(new Date()));
System.out.println("Balancing took "
+ time2Str(Time.monotonicNow() - startTime));
}
}
/** parse command line arguments */
static Parameters parse(String[] args) {
BalancingPolicy policy = Parameters.DEFAULT.policy;
double threshold = Parameters.DEFAULT.threshold;
int maxIdleIteration = Parameters.DEFAULT.maxIdleIteration;
Set<String> nodesTobeExcluded = Parameters.DEFAULT.nodesToBeExcluded;
Set<String> nodesTobeIncluded = Parameters.DEFAULT.nodesToBeIncluded;
boolean runDuringUpgrade = Parameters.DEFAULT.runDuringUpgrade;
if (args != null) {
try {
for(int i = 0; i < args.length; i++) {
if ("-threshold".equalsIgnoreCase(args[i])) {
checkArgument(++i < args.length,
"Threshold value is missing: args = " + Arrays.toString(args));
try {
threshold = Double.parseDouble(args[i]);
if (threshold < 1 || threshold > 100) {
throw new IllegalArgumentException(
"Number out of range: threshold = " + threshold);
}
LOG.info( "Using a threshold of " + threshold );
} catch(IllegalArgumentException e) {
System.err.println(
"Expecting a number in the range of [1.0, 100.0]: "
+ args[i]);
throw e;
}
} else if ("-policy".equalsIgnoreCase(args[i])) {
checkArgument(++i < args.length,
"Policy value is missing: args = " + Arrays.toString(args));
try {
policy = BalancingPolicy.parse(args[i]);
} catch(IllegalArgumentException e) {
System.err.println("Illegal policy name: " + args[i]);
throw e;
}
} else if ("-exclude".equalsIgnoreCase(args[i])) {
checkArgument(++i < args.length,
"List of nodes to exclude | -f <filename> is missing: args = "
+ Arrays.toString(args));
if ("-f".equalsIgnoreCase(args[i])) {
checkArgument(++i < args.length,
"File containing nodes to exclude is not specified: args = "
+ Arrays.toString(args));
nodesTobeExcluded = Util.getHostListFromFile(args[i], "exclude");
} else {
nodesTobeExcluded = Util.parseHostList(args[i]);
}
} else if ("-include".equalsIgnoreCase(args[i])) {
checkArgument(++i < args.length,
"List of nodes to include | -f <filename> is missing: args = "
+ Arrays.toString(args));
if ("-f".equalsIgnoreCase(args[i])) {
checkArgument(++i < args.length,
"File containing nodes to include is not specified: args = "
+ Arrays.toString(args));
nodesTobeIncluded = Util.getHostListFromFile(args[i], "include");
} else {
nodesTobeIncluded = Util.parseHostList(args[i]);
}
} else if ("-idleiterations".equalsIgnoreCase(args[i])) {
checkArgument(++i < args.length,
"idleiterations value is missing: args = " + Arrays
.toString(args));
maxIdleIteration = Integer.parseInt(args[i]);
LOG.info("Using a idleiterations of " + maxIdleIteration);
} else if ("-runDuringUpgrade".equalsIgnoreCase(args[i])) {
runDuringUpgrade = true;
LOG.info("Will run the balancer even during an ongoing HDFS "
+ "upgrade. Most users will not want to run the balancer "
+ "during an upgrade since it will not affect used space "
+ "on over-utilized machines.");
} else {
throw new IllegalArgumentException("args = "
+ Arrays.toString(args));
}
}
checkArgument(nodesTobeExcluded.isEmpty() || nodesTobeIncluded.isEmpty(),
"-exclude and -include options cannot be specified together.");
} catch(RuntimeException e) {
printUsage(System.err);
throw e;
}
}
return new Parameters(policy, threshold, maxIdleIteration,
nodesTobeExcluded, nodesTobeIncluded, runDuringUpgrade);
}
private static void printUsage(PrintStream out) {
out.println(USAGE + "\n");
}
}
/**
* Run a balancer
* @param args Command line arguments
*/
public static void main(String[] args) {
if (DFSUtil.parseHelpArgument(args, USAGE, System.out, true)) {
System.exit(0);
}
try {
System.exit(ToolRunner.run(new HdfsConfiguration(), new Cli(), args));
} catch (Throwable e) {
LOG.error("Exiting balancer due an exception", e);
System.exit(-1);
}
}
}
| 33,592 | 39.135006 | 122 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/BalancingPolicy.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.balancer;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport;
import org.apache.hadoop.hdfs.server.protocol.StorageReport;
import org.apache.hadoop.hdfs.util.EnumCounters;
import org.apache.hadoop.hdfs.util.EnumDoubles;
/**
* Balancing policy.
* Since a datanode may contain multiple block pools,
* {@link Pool} implies {@link Node}
* but NOT the other way around
*/
@InterfaceAudience.Private
abstract class BalancingPolicy {
final EnumCounters<StorageType> totalCapacities
= new EnumCounters<StorageType>(StorageType.class);
final EnumCounters<StorageType> totalUsedSpaces
= new EnumCounters<StorageType>(StorageType.class);
final EnumDoubles<StorageType> avgUtilizations
= new EnumDoubles<StorageType>(StorageType.class);
void reset() {
totalCapacities.reset();
totalUsedSpaces.reset();
avgUtilizations.reset();
}
/** Get the policy name. */
abstract String getName();
/** Accumulate used space and capacity. */
abstract void accumulateSpaces(DatanodeStorageReport r);
void initAvgUtilization() {
for(StorageType t : StorageType.asList()) {
final long capacity = totalCapacities.get(t);
if (capacity > 0L) {
final double avg = totalUsedSpaces.get(t)*100.0/capacity;
avgUtilizations.set(t, avg);
}
}
}
double getAvgUtilization(StorageType t) {
return avgUtilizations.get(t);
}
/** @return the utilization of a particular storage type of a datanode;
* or return null if the datanode does not have such storage type.
*/
abstract Double getUtilization(DatanodeStorageReport r, StorageType t);
@Override
public String toString() {
return BalancingPolicy.class.getSimpleName()
+ "." + getClass().getSimpleName();
}
/** Get all {@link BalancingPolicy} instances*/
static BalancingPolicy parse(String s) {
final BalancingPolicy [] all = {BalancingPolicy.Node.INSTANCE,
BalancingPolicy.Pool.INSTANCE};
for(BalancingPolicy p : all) {
if (p.getName().equalsIgnoreCase(s))
return p;
}
throw new IllegalArgumentException("Cannot parse string \"" + s + "\"");
}
/**
* Cluster is balanced if each node is balanced.
*/
static class Node extends BalancingPolicy {
static final Node INSTANCE = new Node();
private Node() {}
@Override
String getName() {
return "datanode";
}
@Override
void accumulateSpaces(DatanodeStorageReport r) {
for(StorageReport s : r.getStorageReports()) {
final StorageType t = s.getStorage().getStorageType();
totalCapacities.add(t, s.getCapacity());
totalUsedSpaces.add(t, s.getDfsUsed());
}
}
@Override
Double getUtilization(DatanodeStorageReport r, final StorageType t) {
long capacity = 0L;
long dfsUsed = 0L;
for(StorageReport s : r.getStorageReports()) {
if (s.getStorage().getStorageType() == t) {
capacity += s.getCapacity();
dfsUsed += s.getDfsUsed();
}
}
return capacity == 0L? null: dfsUsed*100.0/capacity;
}
}
/**
* Cluster is balanced if each pool in each node is balanced.
*/
static class Pool extends BalancingPolicy {
static final Pool INSTANCE = new Pool();
private Pool() {}
@Override
String getName() {
return "blockpool";
}
@Override
void accumulateSpaces(DatanodeStorageReport r) {
for(StorageReport s : r.getStorageReports()) {
final StorageType t = s.getStorage().getStorageType();
totalCapacities.add(t, s.getCapacity());
totalUsedSpaces.add(t, s.getBlockPoolUsed());
}
}
@Override
Double getUtilization(DatanodeStorageReport r, final StorageType t) {
long capacity = 0L;
long blockPoolUsed = 0L;
for(StorageReport s : r.getStorageReports()) {
if (s.getStorage().getStorageType() == t) {
capacity += s.getCapacity();
blockPoolUsed += s.getBlockPoolUsed();
}
}
return capacity == 0L? null: blockPoolUsed*100.0/capacity;
}
}
}
| 5,090 | 30.81875 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Matcher.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.balancer;
import org.apache.hadoop.net.NetworkTopology;
import org.apache.hadoop.net.Node;
/** A matcher interface for matching nodes. */
public interface Matcher {
/** Given the cluster topology, does the left node match the right node? */
public boolean match(NetworkTopology cluster, Node left, Node right);
/** Match datanodes in the same node group. */
public static final Matcher SAME_NODE_GROUP = new Matcher() {
@Override
public boolean match(NetworkTopology cluster, Node left, Node right) {
return cluster.isOnSameNodeGroup(left, right);
}
@Override
public String toString() {
return "SAME_NODE_GROUP";
}
};
/** Match datanodes in the same rack. */
public static final Matcher SAME_RACK = new Matcher() {
@Override
public boolean match(NetworkTopology cluster, Node left, Node right) {
return cluster.isOnSameRack(left, right);
}
@Override
public String toString() {
return "SAME_RACK";
}
};
/** Match any datanode with any other datanode. */
public static final Matcher ANY_OTHER = new Matcher() {
@Override
public boolean match(NetworkTopology cluster, Node left, Node right) {
return left != right;
}
@Override
public String toString() {
return "ANY_OTHER";
}
};
}
| 2,161 | 31.757576 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/NameNodeConnector.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.balancer;
import java.io.Closeable;
import java.io.IOException;
import java.io.OutputStream;
import java.net.InetAddress;
import java.net.URI;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.List;
import java.util.Map;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicLong;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FsServerDefaults;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.NameNodeProxies;
import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
import org.apache.hadoop.hdfs.protocol.RollingUpgradeInfo;
import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations;
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.ipc.RemoteException;
import com.google.common.annotations.VisibleForTesting;
/**
* The class provides utilities for accessing a NameNode.
*/
@InterfaceAudience.Private
public class NameNodeConnector implements Closeable {
private static final Log LOG = LogFactory.getLog(NameNodeConnector.class);
public static final int DEFAULT_MAX_IDLE_ITERATIONS = 5;
private static boolean write2IdFile = true;
/** Create {@link NameNodeConnector} for the given namenodes. */
public static List<NameNodeConnector> newNameNodeConnectors(
Collection<URI> namenodes, String name, Path idPath, Configuration conf,
int maxIdleIterations) throws IOException {
final List<NameNodeConnector> connectors = new ArrayList<NameNodeConnector>(
namenodes.size());
for (URI uri : namenodes) {
NameNodeConnector nnc = new NameNodeConnector(name, uri, idPath,
null, conf, maxIdleIterations);
nnc.getKeyManager().startBlockKeyUpdater();
connectors.add(nnc);
}
return connectors;
}
public static List<NameNodeConnector> newNameNodeConnectors(
Map<URI, List<Path>> namenodes, String name, Path idPath,
Configuration conf, int maxIdleIterations) throws IOException {
final List<NameNodeConnector> connectors = new ArrayList<NameNodeConnector>(
namenodes.size());
for (Map.Entry<URI, List<Path>> entry : namenodes.entrySet()) {
NameNodeConnector nnc = new NameNodeConnector(name, entry.getKey(),
idPath, entry.getValue(), conf, maxIdleIterations);
nnc.getKeyManager().startBlockKeyUpdater();
connectors.add(nnc);
}
return connectors;
}
@VisibleForTesting
public static void setWrite2IdFile(boolean write2IdFile) {
NameNodeConnector.write2IdFile = write2IdFile;
}
private final URI nameNodeUri;
private final String blockpoolID;
private final NamenodeProtocol namenode;
private final ClientProtocol client;
private final KeyManager keyManager;
final AtomicBoolean fallbackToSimpleAuth = new AtomicBoolean(false);
private final DistributedFileSystem fs;
private final Path idPath;
private final OutputStream out;
private final List<Path> targetPaths;
private final AtomicLong bytesMoved = new AtomicLong();
private final int maxNotChangedIterations;
private int notChangedIterations = 0;
public NameNodeConnector(String name, URI nameNodeUri, Path idPath,
List<Path> targetPaths, Configuration conf,
int maxNotChangedIterations)
throws IOException {
this.nameNodeUri = nameNodeUri;
this.idPath = idPath;
this.targetPaths = targetPaths == null || targetPaths.isEmpty() ? Arrays
.asList(new Path("/")) : targetPaths;
this.maxNotChangedIterations = maxNotChangedIterations;
this.namenode = NameNodeProxies.createProxy(conf, nameNodeUri,
NamenodeProtocol.class).getProxy();
this.client = NameNodeProxies.createProxy(conf, nameNodeUri,
ClientProtocol.class, fallbackToSimpleAuth).getProxy();
this.fs = (DistributedFileSystem)FileSystem.get(nameNodeUri, conf);
final NamespaceInfo namespaceinfo = namenode.versionRequest();
this.blockpoolID = namespaceinfo.getBlockPoolID();
final FsServerDefaults defaults = fs.getServerDefaults(new Path("/"));
this.keyManager = new KeyManager(blockpoolID, namenode,
defaults.getEncryptDataTransfer(), conf);
// if it is for test, we do not create the id file
out = checkAndMarkRunning();
if (out == null) {
// Exit if there is another one running.
throw new IOException("Another " + name + " is running.");
}
}
public DistributedFileSystem getDistributedFileSystem() {
return fs;
}
/** @return the block pool ID */
public String getBlockpoolID() {
return blockpoolID;
}
AtomicLong getBytesMoved() {
return bytesMoved;
}
/** @return blocks with locations. */
public BlocksWithLocations getBlocks(DatanodeInfo datanode, long size)
throws IOException {
return namenode.getBlocks(datanode, size);
}
/**
* @return true if an upgrade is in progress, false if not.
* @throws IOException
*/
public boolean isUpgrading() throws IOException {
// fsimage upgrade
final boolean isUpgrade = !namenode.isUpgradeFinalized();
// rolling upgrade
RollingUpgradeInfo info = fs.rollingUpgrade(
HdfsConstants.RollingUpgradeAction.QUERY);
final boolean isRollingUpgrade = (info != null && !info.isFinalized());
return (isUpgrade || isRollingUpgrade);
}
/** @return live datanode storage reports. */
public DatanodeStorageReport[] getLiveDatanodeStorageReport()
throws IOException {
return client.getDatanodeStorageReport(DatanodeReportType.LIVE);
}
/** @return the key manager */
public KeyManager getKeyManager() {
return keyManager;
}
/** @return the list of paths to scan/migrate */
public List<Path> getTargetPaths() {
return targetPaths;
}
/** Should the instance continue running? */
public boolean shouldContinue(long dispatchBlockMoveBytes) {
if (dispatchBlockMoveBytes > 0) {
notChangedIterations = 0;
} else {
notChangedIterations++;
if (LOG.isDebugEnabled()) {
LOG.debug("No block has been moved for " +
notChangedIterations + " iterations, " +
"maximum notChangedIterations before exit is: " +
((maxNotChangedIterations >= 0) ? maxNotChangedIterations : "Infinite"));
}
if ((maxNotChangedIterations >= 0) &&
(notChangedIterations >= maxNotChangedIterations)) {
System.out.println("No block has been moved for "
+ notChangedIterations + " iterations. Exiting...");
return false;
}
}
return true;
}
/**
* The idea for making sure that there is no more than one instance
* running in an HDFS is to create a file in the HDFS, writes the hostname
* of the machine on which the instance is running to the file, but did not
* close the file until it exits.
*
* This prevents the second instance from running because it can not
* creates the file while the first one is running.
*
* This method checks if there is any running instance. If no, mark yes.
* Note that this is an atomic operation.
*
* @return null if there is a running instance;
* otherwise, the output stream to the newly created file.
*/
private OutputStream checkAndMarkRunning() throws IOException {
try {
if (fs.exists(idPath)) {
// try appending to it so that it will fail fast if another balancer is
// running.
IOUtils.closeStream(fs.append(idPath));
fs.delete(idPath, true);
}
final FSDataOutputStream fsout = fs.create(idPath, false);
// mark balancer idPath to be deleted during filesystem closure
fs.deleteOnExit(idPath);
if (write2IdFile) {
fsout.writeBytes(InetAddress.getLocalHost().getHostName());
fsout.hflush();
}
return fsout;
} catch(RemoteException e) {
if(AlreadyBeingCreatedException.class.getName().equals(e.getClassName())){
return null;
} else {
throw e;
}
}
}
@Override
public void close() {
keyManager.close();
// close the output file
IOUtils.closeStream(out);
if (fs != null) {
try {
fs.delete(idPath, true);
} catch(IOException ioe) {
LOG.warn("Failed to delete " + idPath, ioe);
}
}
}
@Override
public String toString() {
return getClass().getSimpleName() + "[namenodeUri=" + nameNodeUri
+ ", bpid=" + blockpoolID + "]";
}
}
| 10,131 | 34.80212 | 85 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/ExitStatus.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.balancer;
/**
* Exit status - The values associated with each exit status is directly mapped
* to the process's exit code in command line.
*/
public enum ExitStatus {
SUCCESS(0),
IN_PROGRESS(1),
ALREADY_RUNNING(-1),
NO_MOVE_BLOCK(-2),
NO_MOVE_PROGRESS(-3),
IO_EXCEPTION(-4),
ILLEGAL_ARGUMENTS(-5),
INTERRUPTED(-6),
UNFINALIZED_UPGRADE(-7);
private final int code;
private ExitStatus(int code) {
this.code = code;
}
/** @return the command line exit code. */
public int getExitCode() {
return code;
}
}
| 1,393 | 29.977778 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/dev-support/jdiff/Null.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
public class Null {
public Null() { }
}
| 849 | 39.47619 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/HTestCase.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.test;
import static org.junit.Assert.fail;
import java.text.MessageFormat;
import org.apache.hadoop.util.Time;
import org.junit.Rule;
import org.junit.rules.MethodRule;
public abstract class HTestCase {
public static final String TEST_WAITFOR_RATIO_PROP = "test.waitfor.ratio";
static {
SysPropsForTestsLoader.init();
}
private static float WAITFOR_RATIO_DEFAULT = Float.parseFloat(System.getProperty(TEST_WAITFOR_RATIO_PROP, "1"));
private float waitForRatio = WAITFOR_RATIO_DEFAULT;
@Rule
public MethodRule testDir = new TestDirHelper();
@Rule
public MethodRule jettyTestHelper = new TestJettyHelper();
@Rule
public MethodRule exceptionHelper = new TestExceptionHelper();
/**
* Sets the 'wait for ratio' used in the {@link #sleep(long)},
* {@link #waitFor(int, Predicate)} and
* {@link #waitFor(int, boolean, Predicate)} method for the current
* test class.
* <p/>
* This is useful when running tests in slow machine for tests
* that are time sensitive.
*
* @param ratio the 'wait for ratio' to set.
*/
protected void setWaitForRatio(float ratio) {
waitForRatio = ratio;
}
/*
* Returns the 'wait for ratio' used in the {@link #sleep(long)},
* {@link #waitFor(int, Predicate)} and
* {@link #waitFor(int, boolean, Predicate)} methods for the current
* test class.
* <p/>
* This is useful when running tests in slow machine for tests
* that are time sensitive.
* <p/>
* The default value is obtained from the Java System property
* <code>test.wait.for.ratio</code> which defaults to <code>1</code>.
*
* @return the 'wait for ratio' for the current test class.
*/
protected float getWaitForRatio() {
return waitForRatio;
}
/**
* A predicate 'closure' used by the {@link #waitFor(int, Predicate)} and
* {@link #waitFor(int, boolean, Predicate)} methods.
*/
public static interface Predicate {
/**
* Perform a predicate evaluation.
*
* @return the boolean result of the evaluation.
*
* @throws Exception thrown if the predicate evaluation could not evaluate.
*/
public boolean evaluate() throws Exception;
}
/**
* Makes the current thread sleep for the specified number of milliseconds.
* <p/>
* The sleep time is multiplied by the {@link #getWaitForRatio()}.
*
* @param time the number of milliseconds to sleep.
*/
protected void sleep(long time) {
try {
Thread.sleep((long) (getWaitForRatio() * time));
} catch (InterruptedException ex) {
System.err.println(MessageFormat.format("Sleep interrupted, {0}", ex.toString()));
}
}
/**
* Waits up to the specified timeout for the given {@link Predicate} to
* become <code>true</code>, failing the test if the timeout is reached
* and the Predicate is still <code>false</code>.
* <p/>
* The timeout time is multiplied by the {@link #getWaitForRatio()}.
*
* @param timeout the timeout in milliseconds to wait for the predicate.
* @param predicate the predicate ot evaluate.
*
* @return the effective wait, in milli-seconds until the predicate become
* <code>true</code>.
*/
protected long waitFor(int timeout, Predicate predicate) {
return waitFor(timeout, false, predicate);
}
/**
* Waits up to the specified timeout for the given {@link Predicate} to
* become <code>true</code>.
* <p/>
* The timeout time is multiplied by the {@link #getWaitForRatio()}.
*
* @param timeout the timeout in milliseconds to wait for the predicate.
* @param failIfTimeout indicates if the test should be failed if the
* predicate times out.
* @param predicate the predicate ot evaluate.
*
* @return the effective wait, in milli-seconds until the predicate become
* <code>true</code> or <code>-1</code> if the predicate did not evaluate
* to <code>true</code>.
*/
protected long waitFor(int timeout, boolean failIfTimeout, Predicate predicate) {
long started = Time.now();
long mustEnd = Time.now() + (long) (getWaitForRatio() * timeout);
long lastEcho = 0;
try {
long waiting = mustEnd - Time.now();
System.out.println(MessageFormat.format("Waiting up to [{0}] msec", waiting));
boolean eval;
while (!(eval = predicate.evaluate()) && Time.now() < mustEnd) {
if ((Time.now() - lastEcho) > 5000) {
waiting = mustEnd - Time.now();
System.out.println(MessageFormat.format("Waiting up to [{0}] msec", waiting));
lastEcho = Time.now();
}
Thread.sleep(100);
}
if (!eval) {
if (failIfTimeout) {
fail(MessageFormat.format("Waiting timed out after [{0}] msec", timeout));
} else {
System.out.println(MessageFormat.format("Waiting timed out after [{0}] msec", timeout));
}
}
return (eval) ? Time.now() - started : -1;
} catch (Exception ex) {
throw new RuntimeException(ex);
}
}
}
| 5,855 | 32.084746 | 114 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/KerberosTestUtils.java
|
/**
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License. See accompanying LICENSE file.
*/
package org.apache.hadoop.test;
import javax.security.auth.Subject;
import javax.security.auth.kerberos.KerberosPrincipal;
import javax.security.auth.login.AppConfigurationEntry;
import javax.security.auth.login.Configuration;
import javax.security.auth.login.LoginContext;
import org.apache.hadoop.security.authentication.util.KerberosUtil;
import java.io.File;
import java.security.Principal;
import java.security.PrivilegedActionException;
import java.security.PrivilegedExceptionAction;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.Callable;
/**
* Test helper class for Java Kerberos setup.
*/
public class KerberosTestUtils {
private static final String PREFIX = "httpfs.test.";
public static final String REALM = PREFIX + "kerberos.realm";
public static final String CLIENT_PRINCIPAL =
PREFIX + "kerberos.client.principal";
public static final String SERVER_PRINCIPAL =
PREFIX + "kerberos.server.principal";
public static final String KEYTAB_FILE = PREFIX + "kerberos.keytab.file";
public static String getRealm() {
return System.getProperty(REALM, "LOCALHOST");
}
public static String getClientPrincipal() {
return System.getProperty(CLIENT_PRINCIPAL, "client") + "@" + getRealm();
}
public static String getServerPrincipal() {
return System.getProperty(SERVER_PRINCIPAL,
"HTTP/localhost") + "@" + getRealm();
}
public static String getKeytabFile() {
String keytabFile =
new File(System.getProperty("user.home"),
System.getProperty("user.name") + ".keytab").toString();
return System.getProperty(KEYTAB_FILE, keytabFile);
}
private static class KerberosConfiguration extends Configuration {
private String principal;
public KerberosConfiguration(String principal) {
this.principal = principal;
}
@Override
public AppConfigurationEntry[] getAppConfigurationEntry(String name) {
Map<String, String> options = new HashMap<String, String>();
options.put("keyTab", KerberosTestUtils.getKeytabFile());
options.put("principal", principal);
options.put("useKeyTab", "true");
options.put("storeKey", "true");
options.put("doNotPrompt", "true");
options.put("useTicketCache", "true");
options.put("renewTGT", "true");
options.put("refreshKrb5Config", "true");
options.put("isInitiator", "true");
String ticketCache = System.getenv("KRB5CCNAME");
if (ticketCache != null) {
options.put("ticketCache", ticketCache);
}
options.put("debug", "true");
return new AppConfigurationEntry[]{
new AppConfigurationEntry(KerberosUtil.getKrb5LoginModuleName(),
AppConfigurationEntry.LoginModuleControlFlag.REQUIRED,
options),};
}
}
public static <T> T doAs(String principal, final Callable<T> callable)
throws Exception {
LoginContext loginContext = null;
try {
Set<Principal> principals = new HashSet<Principal>();
principals.add(
new KerberosPrincipal(KerberosTestUtils.getClientPrincipal()));
Subject subject = new Subject(false, principals, new HashSet<Object>(),
new HashSet<Object>());
loginContext = new LoginContext("", subject, null,
new KerberosConfiguration(principal));
loginContext.login();
subject = loginContext.getSubject();
return Subject.doAs(subject, new PrivilegedExceptionAction<T>() {
@Override
public T run() throws Exception {
return callable.call();
}
});
} catch (PrivilegedActionException ex) {
throw ex.getException();
} finally {
if (loginContext != null) {
loginContext.logout();
}
}
}
public static <T> T doAsClient(Callable<T> callable) throws Exception {
return doAs(getClientPrincipal(), callable);
}
public static <T> T doAsServer(Callable<T> callable) throws Exception {
return doAs(getServerPrincipal(), callable);
}
}
| 4,779 | 33.388489 | 88 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/TestHTestCase.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.test;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStreamReader;
import java.net.HttpURLConnection;
import java.net.URL;
import javax.servlet.ServletException;
import javax.servlet.http.HttpServlet;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import org.apache.hadoop.util.Time;
import org.junit.Test;
import org.mortbay.jetty.Server;
import org.mortbay.jetty.servlet.Context;
public class TestHTestCase extends HTestCase {
@Test(expected = IllegalStateException.class)
public void testDirNoAnnotation() throws Exception {
TestDirHelper.getTestDir();
}
@Test(expected = IllegalStateException.class)
public void testJettyNoAnnotation() throws Exception {
TestJettyHelper.getJettyServer();
}
@Test(expected = IllegalStateException.class)
public void testJettyNoAnnotation2() throws Exception {
TestJettyHelper.getJettyURL();
}
@Test
@TestDir
public void testDirAnnotation() throws Exception {
assertNotNull(TestDirHelper.getTestDir());
}
@Test
public void waitFor() {
long start = Time.now();
long waited = waitFor(1000, new Predicate() {
@Override
public boolean evaluate() throws Exception {
return true;
}
});
long end = Time.now();
assertEquals(waited, 0, 50);
assertEquals(end - start - waited, 0, 50);
}
@Test
public void waitForTimeOutRatio1() {
setWaitForRatio(1);
long start = Time.now();
long waited = waitFor(200, new Predicate() {
@Override
public boolean evaluate() throws Exception {
return false;
}
});
long end = Time.now();
assertEquals(waited, -1);
assertEquals(end - start, 200, 50);
}
@Test
public void waitForTimeOutRatio2() {
setWaitForRatio(2);
long start = Time.now();
long waited = waitFor(200, new Predicate() {
@Override
public boolean evaluate() throws Exception {
return false;
}
});
long end = Time.now();
assertEquals(waited, -1);
assertEquals(end - start, 200 * getWaitForRatio(), 50 * getWaitForRatio());
}
@Test
public void sleepRatio1() {
setWaitForRatio(1);
long start = Time.now();
sleep(100);
long end = Time.now();
assertEquals(end - start, 100, 50);
}
@Test
public void sleepRatio2() {
setWaitForRatio(1);
long start = Time.now();
sleep(100);
long end = Time.now();
assertEquals(end - start, 100 * getWaitForRatio(), 50 * getWaitForRatio());
}
public static class MyServlet extends HttpServlet {
@Override
protected void doGet(HttpServletRequest req, HttpServletResponse resp) throws ServletException, IOException {
resp.getWriter().write("foo");
}
}
@Test
@TestJetty
public void testJetty() throws Exception {
Context context = new Context();
context.setContextPath("/");
context.addServlet(MyServlet.class, "/bar");
Server server = TestJettyHelper.getJettyServer();
server.addHandler(context);
server.start();
URL url = new URL(TestJettyHelper.getJettyURL(), "/bar");
HttpURLConnection conn = (HttpURLConnection) url.openConnection();
assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_OK);
BufferedReader reader = new BufferedReader(new InputStreamReader(conn.getInputStream()));
assertEquals(reader.readLine(), "foo");
reader.close();
}
@Test
@TestException(exception = RuntimeException.class)
public void testException0() {
throw new RuntimeException("foo");
}
@Test
@TestException(exception = RuntimeException.class, msgRegExp = ".o.")
public void testException1() {
throw new RuntimeException("foo");
}
}
| 4,658 | 27.759259 | 113 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/HadoopUsersConfTestHelper.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.test;
import java.util.ArrayList;
import java.util.List;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.security.UserGroupInformation;
/**
* Helper to configure FileSystemAccess user/group and proxyuser
* configuration for testing using Java System properties.
* <p/>
* It uses the {@link SysPropsForTestsLoader} to load JavaSystem
* properties for testing.
*/
public class HadoopUsersConfTestHelper {
static {
SysPropsForTestsLoader.init();
}
public static final String HADOOP_PROXYUSER = "test.hadoop.proxyuser";
public static final String HADOOP_PROXYUSER_HOSTS = "test.hadoop.proxyuser.hosts";
public static final String HADOOP_PROXYUSER_GROUPS = "test.hadoop.proxyuser.groups";
public static final String HADOOP_USER_PREFIX = "test.hadoop.user.";
/**
* Returns a valid FileSystemAccess proxyuser for the FileSystemAccess cluster.
* <p/>
* The user is read from the Java System property
* <code>test.hadoop.proxyuser</code> which defaults to the current user
* (java System property <code>user.name</code>).
* <p/>
* This property should be set in the <code>test.properties</code> file.
* <p/>
* When running FileSystemAccess minicluster it is used to configure the FileSystemAccess minicluster.
* <p/>
* When using an external FileSystemAccess cluster, it is expected this property is set to
* a valid proxy user.
*
* @return a valid FileSystemAccess proxyuser for the FileSystemAccess cluster.
*/
public static String getHadoopProxyUser() {
return System.getProperty(HADOOP_PROXYUSER, System.getProperty("user.name"));
}
/**
* Returns the hosts for the FileSystemAccess proxyuser settings.
* <p/>
* The hosts are read from the Java System property
* <code>test.hadoop.proxyuser.hosts</code> which defaults to <code>*</code>.
* <p/>
* This property should be set in the <code>test.properties</code> file.
* <p/>
* This property is ONLY used when running FileSystemAccess minicluster, it is used to
* configure the FileSystemAccess minicluster.
* <p/>
* When using an external FileSystemAccess cluster this property is ignored.
*
* @return the hosts for the FileSystemAccess proxyuser settings.
*/
public static String getHadoopProxyUserHosts() {
return System.getProperty(HADOOP_PROXYUSER_HOSTS, "*");
}
/**
* Returns the groups for the FileSystemAccess proxyuser settings.
* <p/>
* The hosts are read from the Java System property
* <code>test.hadoop.proxyuser.groups</code> which defaults to <code>*</code>.
* <p/>
* This property should be set in the <code>test.properties</code> file.
* <p/>
* This property is ONLY used when running FileSystemAccess minicluster, it is used to
* configure the FileSystemAccess minicluster.
* <p/>
* When using an external FileSystemAccess cluster this property is ignored.
*
* @return the groups for the FileSystemAccess proxyuser settings.
*/
public static String getHadoopProxyUserGroups() {
return System.getProperty(HADOOP_PROXYUSER_GROUPS, "*");
}
private static final String[] DEFAULT_USERS = new String[]{"user1", "user2"};
private static final String[] DEFAULT_USERS_GROUP = new String[]{"group1", "supergroup"};
/**
* Returns the FileSystemAccess users to be used for tests. These users are defined
* in the <code>test.properties</code> file in properties of the form
* <code>test.hadoop.user.#USER#=#GROUP1#,#GROUP2#,...</code>.
* <p/>
* These properties are used to configure the FileSystemAccess minicluster user/group
* information.
* <p/>
* When using an external FileSystemAccess cluster these properties should match the
* user/groups settings in the cluster.
*
* @return the FileSystemAccess users used for testing.
*/
public static String[] getHadoopUsers() {
List<String> users = new ArrayList<String>();
for (String name : System.getProperties().stringPropertyNames()) {
if (name.startsWith(HADOOP_USER_PREFIX)) {
users.add(name.substring(HADOOP_USER_PREFIX.length()));
}
}
return (users.size() != 0) ? users.toArray(new String[users.size()]) : DEFAULT_USERS;
}
/**
* Returns the groups a FileSystemAccess user belongs to during tests. These users/groups
* are defined in the <code>test.properties</code> file in properties of the
* form <code>test.hadoop.user.#USER#=#GROUP1#,#GROUP2#,...</code>.
* <p/>
* These properties are used to configure the FileSystemAccess minicluster user/group
* information.
* <p/>
* When using an external FileSystemAccess cluster these properties should match the
* user/groups settings in the cluster.
*
* @param user user name to get gropus.
*
* @return the groups of FileSystemAccess users used for testing.
*/
public static String[] getHadoopUserGroups(String user) {
if (getHadoopUsers() == DEFAULT_USERS) {
for (String defaultUser : DEFAULT_USERS) {
if (defaultUser.equals(user)) {
return DEFAULT_USERS_GROUP;
}
}
return new String[0];
} else {
String groups = System.getProperty(HADOOP_USER_PREFIX + user);
return (groups != null) ? groups.split(",") : new String[0];
}
}
public static Configuration getBaseConf() {
Configuration conf = new Configuration();
for (String name : System.getProperties().stringPropertyNames()) {
conf.set(name, System.getProperty(name));
}
return conf;
}
public static void addUserConf(Configuration conf) {
conf.set("hadoop.security.authentication", "simple");
conf.set("hadoop.proxyuser." + HadoopUsersConfTestHelper.getHadoopProxyUser() + ".hosts",
HadoopUsersConfTestHelper.getHadoopProxyUserHosts());
conf.set("hadoop.proxyuser." + HadoopUsersConfTestHelper.getHadoopProxyUser() + ".groups",
HadoopUsersConfTestHelper.getHadoopProxyUserGroups());
for (String user : HadoopUsersConfTestHelper.getHadoopUsers()) {
String[] groups = HadoopUsersConfTestHelper.getHadoopUserGroups(user);
UserGroupInformation.createUserForTesting(user, groups);
}
}
}
| 7,024 | 37.387978 | 104 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/TestHdfs.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.test;
import java.lang.annotation.Retention;
import java.lang.annotation.Target;
/**
* Annotation for {@link HTestCase} subclasses to indicate that the test method
* requires a FileSystemAccess cluster.
* <p/>
* The {@link TestHdfsHelper#getHdfsConf()} returns a FileSystemAccess JobConf preconfigured to connect
* to the FileSystemAccess test minicluster or the FileSystemAccess cluster information.
* <p/>
* A HDFS test directory for the test will be created. The HDFS test directory
* location can be retrieve using the {@link TestHdfsHelper#getHdfsTestDir()} method.
* <p/>
* Refer to the {@link HTestCase} class for details on how to use and configure
* a FileSystemAccess test minicluster or a real FileSystemAccess cluster for the tests.
*/
@Retention(java.lang.annotation.RetentionPolicy.RUNTIME)
@Target(java.lang.annotation.ElementType.METHOD)
public @interface TestHdfs {
}
| 1,733 | 41.292683 | 103 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/TestExceptionHelper.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.test;
import static org.junit.Assert.fail;
import java.util.regex.Pattern;
import org.junit.Test;
import org.junit.rules.MethodRule;
import org.junit.runners.model.FrameworkMethod;
import org.junit.runners.model.Statement;
public class TestExceptionHelper implements MethodRule {
@Test
public void dummy() {
}
@Override
public Statement apply(final Statement statement, final FrameworkMethod frameworkMethod, final Object o) {
return new Statement() {
@Override
public void evaluate() throws Throwable {
TestException testExceptionAnnotation = frameworkMethod.getAnnotation(TestException.class);
try {
statement.evaluate();
if (testExceptionAnnotation != null) {
Class<? extends Throwable> klass = testExceptionAnnotation.exception();
fail("Expected Exception: " + klass.getSimpleName());
}
} catch (Throwable ex) {
if (testExceptionAnnotation != null) {
Class<? extends Throwable> klass = testExceptionAnnotation.exception();
if (klass.isInstance(ex)) {
String regExp = testExceptionAnnotation.msgRegExp();
Pattern pattern = Pattern.compile(regExp);
if (!pattern.matcher(ex.getMessage()).find()) {
fail("Expected Exception Message pattern: " + regExp + " got message: " + ex.getMessage());
}
} else {
fail("Expected Exception: " + klass.getSimpleName() + " got: " + ex.getClass().getSimpleName());
}
} else {
throw ex;
}
}
}
};
}
}
| 2,473 | 35.382353 | 110 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/TestHFSTestCase.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.test;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.OutputStream;
import java.net.HttpURLConnection;
import java.net.URL;
import javax.servlet.ServletException;
import javax.servlet.http.HttpServlet;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.util.Time;
import org.junit.Test;
import org.mortbay.jetty.Server;
import org.mortbay.jetty.servlet.Context;
public class TestHFSTestCase extends HFSTestCase {
@Test(expected = IllegalStateException.class)
public void testDirNoAnnotation() throws Exception {
TestDirHelper.getTestDir();
}
@Test(expected = IllegalStateException.class)
public void testJettyNoAnnotation() throws Exception {
TestJettyHelper.getJettyServer();
}
@Test(expected = IllegalStateException.class)
public void testJettyNoAnnotation2() throws Exception {
TestJettyHelper.getJettyURL();
}
@Test(expected = IllegalStateException.class)
public void testHdfsNoAnnotation() throws Exception {
TestHdfsHelper.getHdfsConf();
}
@Test(expected = IllegalStateException.class)
public void testHdfsNoAnnotation2() throws Exception {
TestHdfsHelper.getHdfsTestDir();
}
@Test
@TestDir
public void testDirAnnotation() throws Exception {
assertNotNull(TestDirHelper.getTestDir());
}
@Test
public void waitFor() {
long start = Time.now();
long waited = waitFor(1000, new Predicate() {
@Override
public boolean evaluate() throws Exception {
return true;
}
});
long end = Time.now();
assertEquals(waited, 0, 50);
assertEquals(end - start - waited, 0, 50);
}
@Test
public void waitForTimeOutRatio1() {
setWaitForRatio(1);
long start = Time.now();
long waited = waitFor(200, new Predicate() {
@Override
public boolean evaluate() throws Exception {
return false;
}
});
long end = Time.now();
assertEquals(waited, -1);
assertEquals(end - start, 200, 50);
}
@Test
public void waitForTimeOutRatio2() {
setWaitForRatio(2);
long start = Time.now();
long waited = waitFor(200, new Predicate() {
@Override
public boolean evaluate() throws Exception {
return false;
}
});
long end = Time.now();
assertEquals(waited, -1);
assertEquals(end - start, 200 * getWaitForRatio(), 50 * getWaitForRatio());
}
@Test
public void sleepRatio1() {
setWaitForRatio(1);
long start = Time.now();
sleep(100);
long end = Time.now();
assertEquals(end - start, 100, 50);
}
@Test
public void sleepRatio2() {
setWaitForRatio(1);
long start = Time.now();
sleep(100);
long end = Time.now();
assertEquals(end - start, 100 * getWaitForRatio(), 50 * getWaitForRatio());
}
@Test
@TestHdfs
public void testHadoopFileSystem() throws Exception {
Configuration conf = TestHdfsHelper.getHdfsConf();
FileSystem fs = FileSystem.get(conf);
try {
OutputStream os = fs.create(new Path(TestHdfsHelper.getHdfsTestDir(), "foo"));
os.write(new byte[]{1});
os.close();
InputStream is = fs.open(new Path(TestHdfsHelper.getHdfsTestDir(), "foo"));
assertEquals(is.read(), 1);
assertEquals(is.read(), -1);
is.close();
} finally {
fs.close();
}
}
public static class MyServlet extends HttpServlet {
@Override
protected void doGet(HttpServletRequest req, HttpServletResponse resp) throws ServletException, IOException {
resp.getWriter().write("foo");
}
}
@Test
@TestJetty
public void testJetty() throws Exception {
Context context = new Context();
context.setContextPath("/");
context.addServlet(MyServlet.class, "/bar");
Server server = TestJettyHelper.getJettyServer();
server.addHandler(context);
server.start();
URL url = new URL(TestJettyHelper.getJettyURL(), "/bar");
HttpURLConnection conn = (HttpURLConnection) url.openConnection();
assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_OK);
BufferedReader reader = new BufferedReader(new InputStreamReader(conn.getInputStream()));
assertEquals(reader.readLine(), "foo");
reader.close();
}
@Test
@TestException(exception = RuntimeException.class)
public void testException0() {
throw new RuntimeException("foo");
}
@Test
@TestException(exception = RuntimeException.class, msgRegExp = ".o.")
public void testException1() {
throw new RuntimeException("foo");
}
}
| 5,659 | 28.025641 | 113 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/TestDir.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.test;
import java.lang.annotation.Retention;
import java.lang.annotation.Target;
/**
* Annotation for {@link HTestCase} subclasses to indicate that the test method
* requires a test directory in the local file system.
* <p/>
* The test directory location can be retrieve using the
* {@link TestDirHelper#getTestDir()} method.
*/
@Retention(java.lang.annotation.RetentionPolicy.RUNTIME)
@Target(java.lang.annotation.ElementType.METHOD)
public @interface TestDir {
}
| 1,307 | 36.371429 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/TestJetty.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.test;
import java.lang.annotation.Retention;
import java.lang.annotation.Target;
/**
* Annotation for {@link TestJettyHelper} subclasses to indicate that the test method
* requires a Jetty servlet-container.
* <p/>
* The {@link TestJettyHelper#getJettyServer()} returns a ready to configure Jetty
* servlet-container. After registering contexts, servlets, filters the the Jetty
* server must be started (<code>getJettyServer.start()</code>. The Jetty server
* is automatically stopped at the end of the test method invocation.
* <p/>
* Use the {@link TestJettyHelper#getJettyURL()} to obtain the base URL
* (schema://host:port) of the Jetty server.
* <p/>
* Refer to the {@link HTestCase} class for more details.
*/
@Retention(java.lang.annotation.RetentionPolicy.RUNTIME)
@Target(java.lang.annotation.ElementType.METHOD)
public @interface TestJetty {
}
| 1,702 | 40.536585 | 85 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/TestException.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.test;
import java.lang.annotation.Retention;
import java.lang.annotation.Target;
@Retention(java.lang.annotation.RetentionPolicy.RUNTIME)
@Target(java.lang.annotation.ElementType.METHOD)
public @interface TestException {
Class<? extends Throwable> exception();
String msgRegExp() default ".*";
}
| 1,137 | 35.709677 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/TestHdfsHelper.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.test;
import java.io.File;
import java.util.concurrent.atomic.AtomicInteger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.junit.Test;
import org.junit.runners.model.FrameworkMethod;
import org.junit.runners.model.Statement;
public class TestHdfsHelper extends TestDirHelper {
@Override
@Test
public void dummy() {
}
public static final String HADOOP_MINI_HDFS = "test.hadoop.hdfs";
private static final ThreadLocal<Configuration> HDFS_CONF_TL = new InheritableThreadLocal<Configuration>();
private static final ThreadLocal<Path> HDFS_TEST_DIR_TL = new InheritableThreadLocal<Path>();
@Override
public Statement apply(Statement statement, FrameworkMethod frameworkMethod, Object o) {
TestHdfs testHdfsAnnotation = frameworkMethod.getAnnotation(TestHdfs.class);
if (testHdfsAnnotation != null) {
statement = new HdfsStatement(statement, frameworkMethod.getName());
}
return super.apply(statement, frameworkMethod, o);
}
private static class HdfsStatement extends Statement {
private Statement statement;
private String testName;
public HdfsStatement(Statement statement, String testName) {
this.statement = statement;
this.testName = testName;
}
@Override
public void evaluate() throws Throwable {
MiniDFSCluster miniHdfs = null;
Configuration conf = HadoopUsersConfTestHelper.getBaseConf();
if (Boolean.parseBoolean(System.getProperty(HADOOP_MINI_HDFS, "true"))) {
miniHdfs = startMiniHdfs(conf);
conf = miniHdfs.getConfiguration(0);
}
try {
HDFS_CONF_TL.set(conf);
HDFS_TEST_DIR_TL.set(resetHdfsTestDir(conf));
statement.evaluate();
} finally {
HDFS_CONF_TL.remove();
HDFS_TEST_DIR_TL.remove();
}
}
private static AtomicInteger counter = new AtomicInteger();
private Path resetHdfsTestDir(Configuration conf) {
Path testDir = new Path("/tmp/" + testName + "-" +
counter.getAndIncrement());
try {
// currentUser
FileSystem fs = FileSystem.get(conf);
fs.delete(testDir, true);
fs.mkdirs(testDir);
} catch (Exception ex) {
throw new RuntimeException(ex);
}
return testDir;
}
}
/**
* Returns the HDFS test directory for the current test, only available when the
* test method has been annotated with {@link TestHdfs}.
*
* @return the HDFS test directory for the current test. It is an full/absolute
* <code>Path</code>.
*/
public static Path getHdfsTestDir() {
Path testDir = HDFS_TEST_DIR_TL.get();
if (testDir == null) {
throw new IllegalStateException("This test does not use @TestHdfs");
}
return testDir;
}
/**
* Returns a FileSystemAccess <code>JobConf</code> preconfigured with the FileSystemAccess cluster
* settings for testing. This configuration is only available when the test
* method has been annotated with {@link TestHdfs}. Refer to {@link HTestCase}
* header for details)
*
* @return the FileSystemAccess <code>JobConf</code> preconfigured with the FileSystemAccess cluster
* settings for testing
*/
public static Configuration getHdfsConf() {
Configuration conf = HDFS_CONF_TL.get();
if (conf == null) {
throw new IllegalStateException("This test does not use @TestHdfs");
}
return new Configuration(conf);
}
private static MiniDFSCluster MINI_DFS = null;
private static synchronized MiniDFSCluster startMiniHdfs(Configuration conf) throws Exception {
if (MINI_DFS == null) {
if (System.getProperty("hadoop.log.dir") == null) {
System.setProperty("hadoop.log.dir", new File(TEST_DIR_ROOT, "hadoop-log").getAbsolutePath());
}
if (System.getProperty("test.build.data") == null) {
System.setProperty("test.build.data", new File(TEST_DIR_ROOT, "hadoop-data").getAbsolutePath());
}
conf = new Configuration(conf);
HadoopUsersConfTestHelper.addUserConf(conf);
conf.set("fs.hdfs.impl.disable.cache", "true");
conf.set("dfs.block.access.token.enable", "false");
conf.set("dfs.permissions", "true");
conf.set("hadoop.security.authentication", "simple");
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_KEY, true);
MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf);
builder.numDataNodes(2);
MiniDFSCluster miniHdfs = builder.build();
FileSystem fileSystem = miniHdfs.getFileSystem();
fileSystem.mkdirs(new Path("/tmp"));
fileSystem.mkdirs(new Path("/user"));
fileSystem.setPermission(new Path("/tmp"), FsPermission.valueOf("-rwxrwxrwx"));
fileSystem.setPermission(new Path("/user"), FsPermission.valueOf("-rwxrwxrwx"));
MINI_DFS = miniHdfs;
}
return MINI_DFS;
}
}
| 5,992 | 35.321212 | 109 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/HFSTestCase.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.test;
import org.junit.Rule;
import org.junit.rules.MethodRule;
public abstract class HFSTestCase extends HTestCase {
@Rule
public MethodRule hdfsTestHelper = new TestHdfsHelper();
}
| 1,024 | 34.344828 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/SysPropsForTestsLoader.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.test;
import java.io.File;
import java.io.FileReader;
import java.io.IOException;
import java.text.MessageFormat;
import java.util.Map;
import java.util.Properties;
public class SysPropsForTestsLoader {
public static final String TEST_PROPERTIES_PROP = "test.properties";
static {
try {
String testFileName = System.getProperty(TEST_PROPERTIES_PROP, "test.properties");
File currentDir = new File(testFileName).getAbsoluteFile().getParentFile();
File testFile = new File(currentDir, testFileName);
while (currentDir != null && !testFile.exists()) {
testFile = new File(testFile.getAbsoluteFile().getParentFile().getParentFile(), testFileName);
currentDir = currentDir.getParentFile();
if (currentDir != null) {
testFile = new File(currentDir, testFileName);
}
}
if (testFile.exists()) {
System.out.println();
System.out.println(">>> " + TEST_PROPERTIES_PROP + " : " + testFile.getAbsolutePath());
Properties testProperties = new Properties();
testProperties.load(new FileReader(testFile));
for (Map.Entry entry : testProperties.entrySet()) {
if (!System.getProperties().containsKey(entry.getKey())) {
System.setProperty((String) entry.getKey(), (String) entry.getValue());
}
}
} else if (System.getProperty(TEST_PROPERTIES_PROP) != null) {
System.err.println(MessageFormat.format("Specified 'test.properties' file does not exist [{0}]",
System.getProperty(TEST_PROPERTIES_PROP)));
System.exit(-1);
} else {
System.out.println(">>> " + TEST_PROPERTIES_PROP + " : <NONE>");
}
} catch (IOException ex) {
throw new RuntimeException(ex);
}
}
public static void init() {
}
}
| 2,686 | 36.84507 | 104 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/TestJettyHelper.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.test;
import java.net.InetAddress;
import java.net.InetSocketAddress;
import java.net.MalformedURLException;
import java.net.ServerSocket;
import java.net.URL;
import java.net.UnknownHostException;
import org.apache.hadoop.security.ssl.SslSocketConnectorSecure;
import org.junit.Test;
import org.junit.rules.MethodRule;
import org.junit.runners.model.FrameworkMethod;
import org.junit.runners.model.Statement;
import org.mortbay.jetty.Connector;
import org.mortbay.jetty.Server;
import org.mortbay.jetty.security.SslSocketConnector;
public class TestJettyHelper implements MethodRule {
private boolean ssl;
private String keyStoreType;
private String keyStore;
private String keyStorePassword;
private Server server;
public TestJettyHelper() {
this.ssl = false;
}
public TestJettyHelper(String keyStoreType, String keyStore,
String keyStorePassword) {
ssl = true;
this.keyStoreType = keyStoreType;
this.keyStore = keyStore;
this.keyStorePassword = keyStorePassword;
}
private static final ThreadLocal<TestJettyHelper> TEST_JETTY_TL =
new InheritableThreadLocal<TestJettyHelper>();
@Override
public Statement apply(final Statement statement, final FrameworkMethod frameworkMethod, final Object o) {
return new Statement() {
@Override
public void evaluate() throws Throwable {
TestJetty testJetty = frameworkMethod.getAnnotation(TestJetty.class);
if (testJetty != null) {
server = createJettyServer();
}
try {
TEST_JETTY_TL.set(TestJettyHelper.this);
statement.evaluate();
} finally {
TEST_JETTY_TL.remove();
if (server != null && server.isRunning()) {
try {
server.stop();
} catch (Exception ex) {
throw new RuntimeException("Could not stop embedded servlet container, " + ex.getMessage(), ex);
}
}
}
}
};
}
private Server createJettyServer() {
try {
InetAddress localhost = InetAddress.getByName("localhost");
String host = "localhost";
ServerSocket ss = new ServerSocket(0, 50, localhost);
int port = ss.getLocalPort();
ss.close();
Server server = new Server(0);
if (!ssl) {
server.getConnectors()[0].setHost(host);
server.getConnectors()[0].setPort(port);
} else {
SslSocketConnector c = new SslSocketConnectorSecure();
c.setHost(host);
c.setPort(port);
c.setNeedClientAuth(false);
c.setKeystore(keyStore);
c.setKeystoreType(keyStoreType);
c.setKeyPassword(keyStorePassword);
server.setConnectors(new Connector[] {c});
}
return server;
} catch (Exception ex) {
throw new RuntimeException("Could not stop embedded servlet container, " + ex.getMessage(), ex);
}
}
/**
* Returns the authority (hostname & port) used by the JettyServer.
*
* @return an <code>InetSocketAddress</code> with the corresponding authority.
*/
public static InetSocketAddress getAuthority() {
Server server = getJettyServer();
try {
InetAddress add =
InetAddress.getByName(server.getConnectors()[0].getHost());
int port = server.getConnectors()[0].getPort();
return new InetSocketAddress(add, port);
} catch (UnknownHostException ex) {
throw new RuntimeException(ex);
}
}
/**
* Returns a Jetty server ready to be configured and the started. This server
* is only available when the test method has been annotated with
* {@link TestJetty}. Refer to {@link HTestCase} header for details.
* <p/>
* Once configured, the Jetty server should be started. The server will be
* automatically stopped when the test method ends.
*
* @return a Jetty server ready to be configured and the started.
*/
public static Server getJettyServer() {
TestJettyHelper helper = TEST_JETTY_TL.get();
if (helper == null || helper.server == null) {
throw new IllegalStateException("This test does not use @TestJetty");
}
return helper.server;
}
/**
* Returns the base URL (SCHEMA://HOST:PORT) of the test Jetty server
* (see {@link #getJettyServer()}) once started.
*
* @return the base URL (SCHEMA://HOST:PORT) of the test Jetty server.
*/
public static URL getJettyURL() {
TestJettyHelper helper = TEST_JETTY_TL.get();
if (helper == null || helper.server == null) {
throw new IllegalStateException("This test does not use @TestJetty");
}
try {
String scheme = (helper.ssl) ? "https" : "http";
return new URL(scheme + "://" +
helper.server.getConnectors()[0].getHost() + ":" +
helper.server.getConnectors()[0].getPort());
} catch (MalformedURLException ex) {
throw new RuntimeException("It should never happen, " + ex.getMessage(), ex);
}
}
}
| 5,763 | 33.309524 | 110 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/TestDirHelper.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.test;
import java.io.File;
import java.io.IOException;
import java.text.MessageFormat;
import java.util.concurrent.atomic.AtomicInteger;
import org.junit.Test;
import org.junit.rules.MethodRule;
import org.junit.runners.model.FrameworkMethod;
import org.junit.runners.model.Statement;
public class TestDirHelper implements MethodRule {
@Test
public void dummy() {
}
static {
SysPropsForTestsLoader.init();
}
public static final String TEST_DIR_PROP = "test.dir";
static String TEST_DIR_ROOT;
private static void delete(File file) throws IOException {
if (file.getAbsolutePath().length() < 5) {
throw new IllegalArgumentException(
MessageFormat.format("Path [{0}] is too short, not deleting", file.getAbsolutePath()));
}
if (file.exists()) {
if (file.isDirectory()) {
File[] children = file.listFiles();
if (children != null) {
for (File child : children) {
delete(child);
}
}
}
if (!file.delete()) {
throw new RuntimeException(MessageFormat.format("Could not delete path [{0}]", file.getAbsolutePath()));
}
}
}
static {
try {
TEST_DIR_ROOT = System.getProperty(TEST_DIR_PROP, new File("target").getAbsolutePath());
if (!new File(TEST_DIR_ROOT).isAbsolute()) {
System.err.println(MessageFormat.format("System property [{0}]=[{1}] must be set to an absolute path",
TEST_DIR_PROP, TEST_DIR_ROOT));
System.exit(-1);
} else if (TEST_DIR_ROOT.length() < 4) {
System.err.println(MessageFormat.format("System property [{0}]=[{1}] must be at least 4 chars",
TEST_DIR_PROP, TEST_DIR_ROOT));
System.exit(-1);
}
TEST_DIR_ROOT = new File(TEST_DIR_ROOT, "test-dir").getAbsolutePath();
System.setProperty(TEST_DIR_PROP, TEST_DIR_ROOT);
File dir = new File(TEST_DIR_ROOT);
delete(dir);
if (!dir.mkdirs()) {
System.err.println(MessageFormat.format("Could not create test dir [{0}]", TEST_DIR_ROOT));
System.exit(-1);
}
System.out.println(">>> " + TEST_DIR_PROP + " : " + System.getProperty(TEST_DIR_PROP));
} catch (IOException ex) {
throw new RuntimeException(ex);
}
}
private static final ThreadLocal<File> TEST_DIR_TL = new InheritableThreadLocal<File>();
@Override
public Statement apply(final Statement statement, final FrameworkMethod frameworkMethod, final Object o) {
return new Statement() {
@Override
public void evaluate() throws Throwable {
File testDir = null;
TestDir testDirAnnotation = frameworkMethod.getAnnotation(TestDir.class);
if (testDirAnnotation != null) {
testDir = resetTestCaseDir(frameworkMethod.getName());
}
try {
TEST_DIR_TL.set(testDir);
statement.evaluate();
} finally {
TEST_DIR_TL.remove();
}
}
};
}
/**
* Returns the local test directory for the current test, only available when the
* test method has been annotated with {@link TestDir}.
*
* @return the test directory for the current test. It is an full/absolute
* <code>File</code>.
*/
public static File getTestDir() {
File testDir = TEST_DIR_TL.get();
if (testDir == null) {
throw new IllegalStateException("This test does not use @TestDir");
}
return testDir;
}
private static AtomicInteger counter = new AtomicInteger();
private static File resetTestCaseDir(String testName) {
File dir = new File(TEST_DIR_ROOT);
dir = new File(dir, testName + "-" + counter.getAndIncrement());
dir = dir.getAbsoluteFile();
try {
delete(dir);
} catch (IOException ex) {
throw new RuntimeException(MessageFormat.format("Could not delete test dir[{0}], {1}",
dir, ex.getMessage()), ex);
}
if (!dir.mkdirs()) {
throw new RuntimeException(MessageFormat.format("Could not create test dir[{0}]", dir));
}
return dir;
}
}
| 5,001 | 32.797297 | 112 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/TestHttpFSFWithSWebhdfsFileSystem.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.http.client;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.hdfs.web.SWebHdfsFileSystem;
import org.apache.hadoop.security.ssl.KeyStoreTestUtil;
import org.apache.hadoop.test.TestJettyHelper;
import org.junit.AfterClass;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
import java.io.File;
import java.net.URI;
import java.net.URL;
import java.util.UUID;
@RunWith(value = Parameterized.class)
public class TestHttpFSFWithSWebhdfsFileSystem
extends TestHttpFSWithHttpFSFileSystem {
private static String classpathDir;
private static final String BASEDIR = System.getProperty("test.build.dir",
"target/test-dir") + "/" + UUID.randomUUID();
private static Configuration sslConf;
{
URL url = Thread.currentThread().getContextClassLoader().
getResource("classutils.txt");
classpathDir = url.toExternalForm();
if (classpathDir.startsWith("file:")) {
classpathDir = classpathDir.substring("file:".length());
classpathDir = classpathDir.substring(0,
classpathDir.length() - "/classutils.txt".length());
} else {
throw new RuntimeException("Cannot find test classes dir");
}
File base = new File(BASEDIR);
FileUtil.fullyDelete(base);
base.mkdirs();
String keyStoreDir = new File(BASEDIR).getAbsolutePath();
try {
sslConf = new Configuration();
KeyStoreTestUtil.setupSSLConfig(keyStoreDir, classpathDir, sslConf, false);
} catch (Exception ex) {
throw new RuntimeException(ex);
}
jettyTestHelper = new TestJettyHelper("jks", keyStoreDir + "/serverKS.jks",
"serverP");
}
@AfterClass
public static void cleanUp() {
new File(classpathDir, "ssl-client.xml").delete();
new File(classpathDir, "ssl-server.xml").delete();
}
public TestHttpFSFWithSWebhdfsFileSystem(Operation operation) {
super(operation);
}
@Override
protected Class getFileSystemClass() {
return SWebHdfsFileSystem.class;
}
@Override
protected String getScheme() {
return "swebhdfs";
}
@Override
protected FileSystem getHttpFSFileSystem() throws Exception {
Configuration conf = new Configuration(sslConf);
conf.set("fs.swebhdfs.impl", getFileSystemClass().getName());
URI uri = new URI("swebhdfs://" +
TestJettyHelper.getJettyURL().toURI().getAuthority());
return FileSystem.get(uri, conf);
}
}
| 3,325 | 32.26 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/TestHttpFSFileSystemLocalFileSystem.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.http.client;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.test.TestDirHelper;
import org.junit.Assert;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
import java.io.File;
import java.net.URI;
import java.net.URISyntaxException;
@RunWith(value = Parameterized.class)
public class TestHttpFSFileSystemLocalFileSystem extends BaseTestHttpFSWith {
private static String PATH_PREFIX;
static {
new TestDirHelper();
String prefix =
System.getProperty("test.build.dir", "target/test-dir") + "/local";
File file = new File(prefix);
file.mkdirs();
PATH_PREFIX = file.getAbsolutePath();
}
public TestHttpFSFileSystemLocalFileSystem(Operation operation) {
super(operation);
}
@Override
protected Path getProxiedFSTestDir() {
return addPrefix(new Path(TestDirHelper.getTestDir().getAbsolutePath()));
}
@Override
protected String getProxiedFSURI() {
return "file:///";
}
@Override
protected Configuration getProxiedFSConf() {
Configuration conf = new Configuration(false);
conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, getProxiedFSURI());
return conf;
}
protected Path addPrefix(Path path) {
return Path.mergePaths(new Path(PATH_PREFIX), path);
}
@Override
protected void testSetPermission() throws Exception {
if (Path.WINDOWS) {
FileSystem fs = FileSystem.get(getProxiedFSConf());
Path path = new Path(getProxiedFSTestDir(), "foodir");
fs.mkdirs(path);
fs = getHttpFSFileSystem();
FsPermission permission1 = new FsPermission(FsAction.READ_WRITE, FsAction.NONE, FsAction.NONE);
fs.setPermission(path, permission1);
fs.close();
fs = FileSystem.get(getProxiedFSConf());
FileStatus status1 = fs.getFileStatus(path);
fs.close();
FsPermission permission2 = status1.getPermission();
Assert.assertEquals(permission2, permission1);
// sticky bit not supported on Windows with local file system, so the
// subclass skips that part of the test
} else {
super.testSetPermission();
}
}
}
| 3,248 | 31.168317 | 101 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/TestHttpFSWithHttpFSFileSystem.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.http.client;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.test.TestHdfsHelper;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
@RunWith(value = Parameterized.class)
public class TestHttpFSWithHttpFSFileSystem extends BaseTestHttpFSWith {
public TestHttpFSWithHttpFSFileSystem(Operation operation) {
super(operation);
}
@Override
protected Class getFileSystemClass() {
return HttpFSFileSystem.class;
}
@Override
protected Path getProxiedFSTestDir() {
return TestHdfsHelper.getHdfsTestDir();
}
@Override
protected String getProxiedFSURI() {
return TestHdfsHelper.getHdfsConf().get(
CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY);
}
@Override
protected Configuration getProxiedFSConf() {
return TestHdfsHelper.getHdfsConf();
}
}
| 1,772 | 30.105263 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/TestHttpFSFWithWebhdfsFileSystem.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.http.client;
import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
@RunWith(value = Parameterized.class)
public class TestHttpFSFWithWebhdfsFileSystem
extends TestHttpFSWithHttpFSFileSystem {
public TestHttpFSFWithWebhdfsFileSystem(Operation operation) {
super(operation);
}
@Override
protected Class getFileSystemClass() {
return WebHdfsFileSystem.class;
}
}
| 1,294 | 32.205128 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/BaseTestHttpFSWith.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.http.client;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.fs.ContentSummary;
import org.apache.hadoop.fs.FileChecksum;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileSystemTestHelper;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.http.server.HttpFSServerWebApp;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.AppendTestUtil;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.test.HFSTestCase;
import org.apache.hadoop.test.HadoopUsersConfTestHelper;
import org.apache.hadoop.test.TestDir;
import org.apache.hadoop.test.TestDirHelper;
import org.apache.hadoop.test.TestHdfs;
import org.apache.hadoop.test.TestJetty;
import org.apache.hadoop.test.TestJettyHelper;
import org.junit.Assert;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
import org.mortbay.jetty.Server;
import org.mortbay.jetty.webapp.WebAppContext;
import com.google.common.collect.Lists;
import java.io.File;
import java.io.FileOutputStream;
import java.io.FileWriter;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.io.Writer;
import java.net.URI;
import java.net.URL;
import java.security.PrivilegedExceptionAction;
import java.util.Arrays;
import java.util.Collection;
import java.util.List;
import java.util.Map;
@RunWith(value = Parameterized.class)
public abstract class BaseTestHttpFSWith extends HFSTestCase {
protected abstract Path getProxiedFSTestDir();
protected abstract String getProxiedFSURI();
protected abstract Configuration getProxiedFSConf();
protected boolean isLocalFS() {
return getProxiedFSURI().startsWith("file://");
}
private void createHttpFSServer() throws Exception {
File homeDir = TestDirHelper.getTestDir();
Assert.assertTrue(new File(homeDir, "conf").mkdir());
Assert.assertTrue(new File(homeDir, "log").mkdir());
Assert.assertTrue(new File(homeDir, "temp").mkdir());
HttpFSServerWebApp.setHomeDirForCurrentThread(homeDir.getAbsolutePath());
File secretFile = new File(new File(homeDir, "conf"), "secret");
Writer w = new FileWriter(secretFile);
w.write("secret");
w.close();
//FileSystem being served by HttpFS
String fsDefaultName = getProxiedFSURI();
Configuration conf = new Configuration(false);
conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, fsDefaultName);
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_KEY, true);
File hdfsSite = new File(new File(homeDir, "conf"), "hdfs-site.xml");
OutputStream os = new FileOutputStream(hdfsSite);
conf.writeXml(os);
os.close();
//HTTPFS configuration
conf = new Configuration(false);
conf.set("httpfs.proxyuser." + HadoopUsersConfTestHelper.getHadoopProxyUser() + ".groups",
HadoopUsersConfTestHelper.getHadoopProxyUserGroups());
conf.set("httpfs.proxyuser." + HadoopUsersConfTestHelper.getHadoopProxyUser() + ".hosts",
HadoopUsersConfTestHelper.getHadoopProxyUserHosts());
conf.set("httpfs.authentication.signature.secret.file", secretFile.getAbsolutePath());
File httpfsSite = new File(new File(homeDir, "conf"), "httpfs-site.xml");
os = new FileOutputStream(httpfsSite);
conf.writeXml(os);
os.close();
ClassLoader cl = Thread.currentThread().getContextClassLoader();
URL url = cl.getResource("webapp");
WebAppContext context = new WebAppContext(url.getPath(), "/webhdfs");
Server server = TestJettyHelper.getJettyServer();
server.addHandler(context);
server.start();
}
protected Class getFileSystemClass() {
return HttpFSFileSystem.class;
}
protected String getScheme() {
return "webhdfs";
}
protected FileSystem getHttpFSFileSystem() throws Exception {
Configuration conf = new Configuration();
conf.set("fs.webhdfs.impl", getFileSystemClass().getName());
URI uri = new URI(getScheme() + "://" +
TestJettyHelper.getJettyURL().toURI().getAuthority());
return FileSystem.get(uri, conf);
}
protected void testGet() throws Exception {
FileSystem fs = getHttpFSFileSystem();
Assert.assertNotNull(fs);
URI uri = new URI(getScheme() + "://" +
TestJettyHelper.getJettyURL().toURI().getAuthority());
Assert.assertEquals(fs.getUri(), uri);
fs.close();
}
private void testOpen() throws Exception {
FileSystem fs = FileSystem.get(getProxiedFSConf());
Path path = new Path(getProxiedFSTestDir(), "foo.txt");
OutputStream os = fs.create(path);
os.write(1);
os.close();
fs.close();
fs = getHttpFSFileSystem();
InputStream is = fs.open(new Path(path.toUri().getPath()));
Assert.assertEquals(is.read(), 1);
is.close();
fs.close();
}
private void testCreate(Path path, boolean override) throws Exception {
FileSystem fs = getHttpFSFileSystem();
FsPermission permission = new FsPermission(FsAction.READ_WRITE, FsAction.NONE, FsAction.NONE);
OutputStream os = fs.create(new Path(path.toUri().getPath()), permission, override, 1024,
(short) 2, 100 * 1024 * 1024, null);
os.write(1);
os.close();
fs.close();
fs = FileSystem.get(getProxiedFSConf());
FileStatus status = fs.getFileStatus(path);
if (!isLocalFS()) {
Assert.assertEquals(status.getReplication(), 2);
Assert.assertEquals(status.getBlockSize(), 100 * 1024 * 1024);
}
Assert.assertEquals(status.getPermission(), permission);
InputStream is = fs.open(path);
Assert.assertEquals(is.read(), 1);
is.close();
fs.close();
}
private void testCreate() throws Exception {
Path path = new Path(getProxiedFSTestDir(), "foo.txt");
FileSystem fs = FileSystem.get(getProxiedFSConf());
fs.delete(path, true);
testCreate(path, false);
testCreate(path, true);
try {
testCreate(path, false);
Assert.fail("the create should have failed because the file exists " +
"and override is FALSE");
} catch (IOException ex) {
System.out.println("#");
} catch (Exception ex) {
Assert.fail(ex.toString());
}
}
private void testAppend() throws Exception {
if (!isLocalFS()) {
FileSystem fs = FileSystem.get(getProxiedFSConf());
fs.mkdirs(getProxiedFSTestDir());
Path path = new Path(getProxiedFSTestDir(), "foo.txt");
OutputStream os = fs.create(path);
os.write(1);
os.close();
fs.close();
fs = getHttpFSFileSystem();
os = fs.append(new Path(path.toUri().getPath()));
os.write(2);
os.close();
fs.close();
fs = FileSystem.get(getProxiedFSConf());
InputStream is = fs.open(path);
Assert.assertEquals(is.read(), 1);
Assert.assertEquals(is.read(), 2);
Assert.assertEquals(is.read(), -1);
is.close();
fs.close();
}
}
private void testTruncate() throws Exception {
if (!isLocalFS()) {
final short repl = 3;
final int blockSize = 1024;
final int numOfBlocks = 2;
FileSystem fs = FileSystem.get(getProxiedFSConf());
fs.mkdirs(getProxiedFSTestDir());
Path file = new Path(getProxiedFSTestDir(), "foo.txt");
final byte[] data = FileSystemTestHelper.getFileData(
numOfBlocks, blockSize);
FileSystemTestHelper.createFile(fs, file, data, blockSize, repl);
final int newLength = blockSize;
boolean isReady = fs.truncate(file, newLength);
Assert.assertTrue("Recovery is not expected.", isReady);
FileStatus fileStatus = fs.getFileStatus(file);
Assert.assertEquals(fileStatus.getLen(), newLength);
AppendTestUtil.checkFullFile(fs, file, newLength, data, file.toString());
fs.close();
}
}
private void testConcat() throws Exception {
Configuration config = getProxiedFSConf();
config.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 1024);
if (!isLocalFS()) {
FileSystem fs = FileSystem.get(config);
fs.mkdirs(getProxiedFSTestDir());
Path path1 = new Path("/test/foo.txt");
Path path2 = new Path("/test/bar.txt");
Path path3 = new Path("/test/derp.txt");
DFSTestUtil.createFile(fs, path1, 1024, (short) 3, 0);
DFSTestUtil.createFile(fs, path2, 1024, (short) 3, 0);
DFSTestUtil.createFile(fs, path3, 1024, (short) 3, 0);
fs.close();
fs = getHttpFSFileSystem();
fs.concat(path1, new Path[]{path2, path3});
fs.close();
fs = FileSystem.get(config);
Assert.assertTrue(fs.exists(path1));
Assert.assertFalse(fs.exists(path2));
Assert.assertFalse(fs.exists(path3));
fs.close();
}
}
private void testRename() throws Exception {
FileSystem fs = FileSystem.get(getProxiedFSConf());
Path path = new Path(getProxiedFSTestDir(), "foo");
fs.mkdirs(path);
fs.close();
fs = getHttpFSFileSystem();
Path oldPath = new Path(path.toUri().getPath());
Path newPath = new Path(path.getParent(), "bar");
fs.rename(oldPath, newPath);
fs.close();
fs = FileSystem.get(getProxiedFSConf());
Assert.assertFalse(fs.exists(oldPath));
Assert.assertTrue(fs.exists(newPath));
fs.close();
}
private void testDelete() throws Exception {
Path foo = new Path(getProxiedFSTestDir(), "foo");
Path bar = new Path(getProxiedFSTestDir(), "bar");
Path foe = new Path(getProxiedFSTestDir(), "foe");
FileSystem fs = FileSystem.get(getProxiedFSConf());
fs.mkdirs(foo);
fs.mkdirs(new Path(bar, "a"));
fs.mkdirs(foe);
FileSystem hoopFs = getHttpFSFileSystem();
Assert.assertTrue(hoopFs.delete(new Path(foo.toUri().getPath()), false));
Assert.assertFalse(fs.exists(foo));
try {
hoopFs.delete(new Path(bar.toUri().getPath()), false);
Assert.fail();
} catch (IOException ex) {
} catch (Exception ex) {
Assert.fail();
}
Assert.assertTrue(fs.exists(bar));
Assert.assertTrue(hoopFs.delete(new Path(bar.toUri().getPath()), true));
Assert.assertFalse(fs.exists(bar));
Assert.assertTrue(fs.exists(foe));
Assert.assertTrue(hoopFs.delete(foe, true));
Assert.assertFalse(fs.exists(foe));
hoopFs.close();
fs.close();
}
private void testListStatus() throws Exception {
FileSystem fs = FileSystem.get(getProxiedFSConf());
Path path = new Path(getProxiedFSTestDir(), "foo.txt");
OutputStream os = fs.create(path);
os.write(1);
os.close();
FileStatus status1 = fs.getFileStatus(path);
fs.close();
fs = getHttpFSFileSystem();
FileStatus status2 = fs.getFileStatus(new Path(path.toUri().getPath()));
fs.close();
Assert.assertEquals(status2.getPermission(), status1.getPermission());
Assert.assertEquals(status2.getPath().toUri().getPath(), status1.getPath().toUri().getPath());
Assert.assertEquals(status2.getReplication(), status1.getReplication());
Assert.assertEquals(status2.getBlockSize(), status1.getBlockSize());
Assert.assertEquals(status2.getAccessTime(), status1.getAccessTime());
Assert.assertEquals(status2.getModificationTime(), status1.getModificationTime());
Assert.assertEquals(status2.getOwner(), status1.getOwner());
Assert.assertEquals(status2.getGroup(), status1.getGroup());
Assert.assertEquals(status2.getLen(), status1.getLen());
FileStatus[] stati = fs.listStatus(path.getParent());
Assert.assertEquals(stati.length, 1);
Assert.assertEquals(stati[0].getPath().getName(), path.getName());
}
private void testWorkingdirectory() throws Exception {
FileSystem fs = FileSystem.get(getProxiedFSConf());
Path workingDir = fs.getWorkingDirectory();
fs.close();
fs = getHttpFSFileSystem();
if (isLocalFS()) {
fs.setWorkingDirectory(workingDir);
}
Path httpFSWorkingDir = fs.getWorkingDirectory();
fs.close();
Assert.assertEquals(httpFSWorkingDir.toUri().getPath(),
workingDir.toUri().getPath());
fs = getHttpFSFileSystem();
fs.setWorkingDirectory(new Path("/tmp"));
workingDir = fs.getWorkingDirectory();
fs.close();
Assert.assertEquals(workingDir.toUri().getPath(), new Path("/tmp").toUri().getPath());
}
private void testMkdirs() throws Exception {
Path path = new Path(getProxiedFSTestDir(), "foo");
FileSystem fs = getHttpFSFileSystem();
fs.mkdirs(path);
fs.close();
fs = FileSystem.get(getProxiedFSConf());
Assert.assertTrue(fs.exists(path));
fs.close();
}
private void testSetTimes() throws Exception {
if (!isLocalFS()) {
FileSystem fs = FileSystem.get(getProxiedFSConf());
Path path = new Path(getProxiedFSTestDir(), "foo.txt");
OutputStream os = fs.create(path);
os.write(1);
os.close();
FileStatus status1 = fs.getFileStatus(path);
fs.close();
long at = status1.getAccessTime();
long mt = status1.getModificationTime();
fs = getHttpFSFileSystem();
fs.setTimes(path, mt - 10, at - 20);
fs.close();
fs = FileSystem.get(getProxiedFSConf());
status1 = fs.getFileStatus(path);
fs.close();
long atNew = status1.getAccessTime();
long mtNew = status1.getModificationTime();
Assert.assertEquals(mtNew, mt - 10);
Assert.assertEquals(atNew, at - 20);
}
}
protected void testSetPermission() throws Exception {
FileSystem fs = FileSystem.get(getProxiedFSConf());
Path path = new Path(getProxiedFSTestDir(), "foodir");
fs.mkdirs(path);
fs = getHttpFSFileSystem();
FsPermission permission1 = new FsPermission(FsAction.READ_WRITE, FsAction.NONE, FsAction.NONE);
fs.setPermission(path, permission1);
fs.close();
fs = FileSystem.get(getProxiedFSConf());
FileStatus status1 = fs.getFileStatus(path);
fs.close();
FsPermission permission2 = status1.getPermission();
Assert.assertEquals(permission2, permission1);
//sticky bit
fs = getHttpFSFileSystem();
permission1 = new FsPermission(FsAction.READ_WRITE, FsAction.NONE, FsAction.NONE, true);
fs.setPermission(path, permission1);
fs.close();
fs = FileSystem.get(getProxiedFSConf());
status1 = fs.getFileStatus(path);
fs.close();
permission2 = status1.getPermission();
Assert.assertTrue(permission2.getStickyBit());
Assert.assertEquals(permission2, permission1);
}
private void testSetOwner() throws Exception {
if (!isLocalFS()) {
FileSystem fs = FileSystem.get(getProxiedFSConf());
fs.mkdirs(getProxiedFSTestDir());
Path path = new Path(getProxiedFSTestDir(), "foo.txt");
OutputStream os = fs.create(path);
os.write(1);
os.close();
fs.close();
fs = getHttpFSFileSystem();
String user = HadoopUsersConfTestHelper.getHadoopUsers()[1];
String group = HadoopUsersConfTestHelper.getHadoopUserGroups(user)[0];
fs.setOwner(path, user, group);
fs.close();
fs = FileSystem.get(getProxiedFSConf());
FileStatus status1 = fs.getFileStatus(path);
fs.close();
Assert.assertEquals(status1.getOwner(), user);
Assert.assertEquals(status1.getGroup(), group);
}
}
private void testSetReplication() throws Exception {
FileSystem fs = FileSystem.get(getProxiedFSConf());
Path path = new Path(getProxiedFSTestDir(), "foo.txt");
OutputStream os = fs.create(path);
os.write(1);
os.close();
fs.close();
fs.setReplication(path, (short) 2);
fs = getHttpFSFileSystem();
fs.setReplication(path, (short) 1);
fs.close();
fs = FileSystem.get(getProxiedFSConf());
FileStatus status1 = fs.getFileStatus(path);
fs.close();
Assert.assertEquals(status1.getReplication(), (short) 1);
}
private void testChecksum() throws Exception {
if (!isLocalFS()) {
FileSystem fs = FileSystem.get(getProxiedFSConf());
fs.mkdirs(getProxiedFSTestDir());
Path path = new Path(getProxiedFSTestDir(), "foo.txt");
OutputStream os = fs.create(path);
os.write(1);
os.close();
FileChecksum hdfsChecksum = fs.getFileChecksum(path);
fs.close();
fs = getHttpFSFileSystem();
FileChecksum httpChecksum = fs.getFileChecksum(path);
fs.close();
Assert.assertEquals(httpChecksum.getAlgorithmName(), hdfsChecksum.getAlgorithmName());
Assert.assertEquals(httpChecksum.getLength(), hdfsChecksum.getLength());
Assert.assertArrayEquals(httpChecksum.getBytes(), hdfsChecksum.getBytes());
}
}
private void testContentSummary() throws Exception {
FileSystem fs = FileSystem.get(getProxiedFSConf());
Path path = new Path(getProxiedFSTestDir(), "foo.txt");
OutputStream os = fs.create(path);
os.write(1);
os.close();
ContentSummary hdfsContentSummary = fs.getContentSummary(path);
fs.close();
fs = getHttpFSFileSystem();
ContentSummary httpContentSummary = fs.getContentSummary(path);
fs.close();
Assert.assertEquals(httpContentSummary.getDirectoryCount(), hdfsContentSummary.getDirectoryCount());
Assert.assertEquals(httpContentSummary.getFileCount(), hdfsContentSummary.getFileCount());
Assert.assertEquals(httpContentSummary.getLength(), hdfsContentSummary.getLength());
Assert.assertEquals(httpContentSummary.getQuota(), hdfsContentSummary.getQuota());
Assert.assertEquals(httpContentSummary.getSpaceConsumed(), hdfsContentSummary.getSpaceConsumed());
Assert.assertEquals(httpContentSummary.getSpaceQuota(), hdfsContentSummary.getSpaceQuota());
}
/** Set xattr */
private void testSetXAttr() throws Exception {
if (!isLocalFS()) {
FileSystem fs = FileSystem.get(getProxiedFSConf());
fs.mkdirs(getProxiedFSTestDir());
Path path = new Path(getProxiedFSTestDir(), "foo.txt");
OutputStream os = fs.create(path);
os.write(1);
os.close();
fs.close();
final String name1 = "user.a1";
final byte[] value1 = new byte[]{0x31, 0x32, 0x33};
final String name2 = "user.a2";
final byte[] value2 = new byte[]{0x41, 0x42, 0x43};
final String name3 = "user.a3";
final byte[] value3 = null;
final String name4 = "trusted.a1";
final byte[] value4 = new byte[]{0x31, 0x32, 0x33};
final String name5 = "a1";
fs = getHttpFSFileSystem();
fs.setXAttr(path, name1, value1);
fs.setXAttr(path, name2, value2);
fs.setXAttr(path, name3, value3);
fs.setXAttr(path, name4, value4);
try {
fs.setXAttr(path, name5, value1);
Assert.fail("Set xAttr with incorrect name format should fail.");
} catch (IOException e) {
} catch (IllegalArgumentException e) {
}
fs.close();
fs = FileSystem.get(getProxiedFSConf());
Map<String, byte[]> xAttrs = fs.getXAttrs(path);
fs.close();
Assert.assertEquals(4, xAttrs.size());
Assert.assertArrayEquals(value1, xAttrs.get(name1));
Assert.assertArrayEquals(value2, xAttrs.get(name2));
Assert.assertArrayEquals(new byte[0], xAttrs.get(name3));
Assert.assertArrayEquals(value4, xAttrs.get(name4));
}
}
/** Get xattrs */
private void testGetXAttrs() throws Exception {
if (!isLocalFS()) {
FileSystem fs = FileSystem.get(getProxiedFSConf());
fs.mkdirs(getProxiedFSTestDir());
Path path = new Path(getProxiedFSTestDir(), "foo.txt");
OutputStream os = fs.create(path);
os.write(1);
os.close();
fs.close();
final String name1 = "user.a1";
final byte[] value1 = new byte[]{0x31, 0x32, 0x33};
final String name2 = "user.a2";
final byte[] value2 = new byte[]{0x41, 0x42, 0x43};
final String name3 = "user.a3";
final byte[] value3 = null;
final String name4 = "trusted.a1";
final byte[] value4 = new byte[]{0x31, 0x32, 0x33};
fs = FileSystem.get(getProxiedFSConf());
fs.setXAttr(path, name1, value1);
fs.setXAttr(path, name2, value2);
fs.setXAttr(path, name3, value3);
fs.setXAttr(path, name4, value4);
fs.close();
// Get xattrs with names parameter
fs = getHttpFSFileSystem();
List<String> names = Lists.newArrayList();
names.add(name1);
names.add(name2);
names.add(name3);
names.add(name4);
Map<String, byte[]> xAttrs = fs.getXAttrs(path, names);
fs.close();
Assert.assertEquals(4, xAttrs.size());
Assert.assertArrayEquals(value1, xAttrs.get(name1));
Assert.assertArrayEquals(value2, xAttrs.get(name2));
Assert.assertArrayEquals(new byte[0], xAttrs.get(name3));
Assert.assertArrayEquals(value4, xAttrs.get(name4));
// Get specific xattr
fs = getHttpFSFileSystem();
byte[] value = fs.getXAttr(path, name1);
Assert.assertArrayEquals(value1, value);
final String name5 = "a1";
try {
value = fs.getXAttr(path, name5);
Assert.fail("Get xAttr with incorrect name format should fail.");
} catch (IOException e) {
} catch (IllegalArgumentException e) {
}
fs.close();
// Get all xattrs
fs = getHttpFSFileSystem();
xAttrs = fs.getXAttrs(path);
fs.close();
Assert.assertEquals(4, xAttrs.size());
Assert.assertArrayEquals(value1, xAttrs.get(name1));
Assert.assertArrayEquals(value2, xAttrs.get(name2));
Assert.assertArrayEquals(new byte[0], xAttrs.get(name3));
Assert.assertArrayEquals(value4, xAttrs.get(name4));
}
}
/** Remove xattr */
private void testRemoveXAttr() throws Exception {
if (!isLocalFS()) {
FileSystem fs = FileSystem.get(getProxiedFSConf());
fs.mkdirs(getProxiedFSTestDir());
Path path = new Path(getProxiedFSTestDir(), "foo.txt");
OutputStream os = fs.create(path);
os.write(1);
os.close();
fs.close();
final String name1 = "user.a1";
final byte[] value1 = new byte[]{0x31, 0x32, 0x33};
final String name2 = "user.a2";
final byte[] value2 = new byte[]{0x41, 0x42, 0x43};
final String name3 = "user.a3";
final byte[] value3 = null;
final String name4 = "trusted.a1";
final byte[] value4 = new byte[]{0x31, 0x32, 0x33};
final String name5 = "a1";
fs = FileSystem.get(getProxiedFSConf());
fs.setXAttr(path, name1, value1);
fs.setXAttr(path, name2, value2);
fs.setXAttr(path, name3, value3);
fs.setXAttr(path, name4, value4);
fs.close();
fs = getHttpFSFileSystem();
fs.removeXAttr(path, name1);
fs.removeXAttr(path, name3);
fs.removeXAttr(path, name4);
try {
fs.removeXAttr(path, name5);
Assert.fail("Remove xAttr with incorrect name format should fail.");
} catch (IOException e) {
} catch (IllegalArgumentException e) {
}
fs = FileSystem.get(getProxiedFSConf());
Map<String, byte[]> xAttrs = fs.getXAttrs(path);
fs.close();
Assert.assertEquals(1, xAttrs.size());
Assert.assertArrayEquals(value2, xAttrs.get(name2));
}
}
/** List xattrs */
private void testListXAttrs() throws Exception {
if (!isLocalFS()) {
FileSystem fs = FileSystem.get(getProxiedFSConf());
fs.mkdirs(getProxiedFSTestDir());
Path path = new Path(getProxiedFSTestDir(), "foo.txt");
OutputStream os = fs.create(path);
os.write(1);
os.close();
fs.close();
final String name1 = "user.a1";
final byte[] value1 = new byte[]{0x31, 0x32, 0x33};
final String name2 = "user.a2";
final byte[] value2 = new byte[]{0x41, 0x42, 0x43};
final String name3 = "user.a3";
final byte[] value3 = null;
final String name4 = "trusted.a1";
final byte[] value4 = new byte[]{0x31, 0x32, 0x33};
fs = FileSystem.get(getProxiedFSConf());
fs.setXAttr(path, name1, value1);
fs.setXAttr(path, name2, value2);
fs.setXAttr(path, name3, value3);
fs.setXAttr(path, name4, value4);
fs.close();
fs = getHttpFSFileSystem();
List<String> names = fs.listXAttrs(path);
Assert.assertEquals(4, names.size());
Assert.assertTrue(names.contains(name1));
Assert.assertTrue(names.contains(name2));
Assert.assertTrue(names.contains(name3));
Assert.assertTrue(names.contains(name4));
}
}
/**
* Runs assertions testing that two AclStatus objects contain the same info
* @param a First AclStatus
* @param b Second AclStatus
* @throws Exception
*/
private void assertSameAcls(AclStatus a, AclStatus b) throws Exception {
Assert.assertTrue(a.getOwner().equals(b.getOwner()));
Assert.assertTrue(a.getGroup().equals(b.getGroup()));
Assert.assertTrue(a.isStickyBit() == b.isStickyBit());
Assert.assertTrue(a.getEntries().size() == b.getEntries().size());
for (AclEntry e : a.getEntries()) {
Assert.assertTrue(b.getEntries().contains(e));
}
for (AclEntry e : b.getEntries()) {
Assert.assertTrue(a.getEntries().contains(e));
}
}
/**
* Simple ACL tests on a file: Set an acl, add an acl, remove one acl,
* and remove all acls.
* @throws Exception
*/
private void testFileAcls() throws Exception {
if ( isLocalFS() ) {
return;
}
final String aclUser1 = "user:foo:rw-";
final String aclUser2 = "user:bar:r--";
final String aclGroup1 = "group::r--";
final String aclSet = "user::rwx," + aclUser1 + ","
+ aclGroup1 + ",other::---";
FileSystem proxyFs = FileSystem.get(getProxiedFSConf());
FileSystem httpfs = getHttpFSFileSystem();
Path path = new Path(getProxiedFSTestDir(), "testAclStatus.txt");
OutputStream os = proxyFs.create(path);
os.write(1);
os.close();
AclStatus proxyAclStat = proxyFs.getAclStatus(path);
AclStatus httpfsAclStat = httpfs.getAclStatus(path);
assertSameAcls(httpfsAclStat, proxyAclStat);
httpfs.setAcl(path, AclEntry.parseAclSpec(aclSet,true));
proxyAclStat = proxyFs.getAclStatus(path);
httpfsAclStat = httpfs.getAclStatus(path);
assertSameAcls(httpfsAclStat, proxyAclStat);
httpfs.modifyAclEntries(path, AclEntry.parseAclSpec(aclUser2, true));
proxyAclStat = proxyFs.getAclStatus(path);
httpfsAclStat = httpfs.getAclStatus(path);
assertSameAcls(httpfsAclStat, proxyAclStat);
httpfs.removeAclEntries(path, AclEntry.parseAclSpec(aclUser1, true));
proxyAclStat = proxyFs.getAclStatus(path);
httpfsAclStat = httpfs.getAclStatus(path);
assertSameAcls(httpfsAclStat, proxyAclStat);
httpfs.removeAcl(path);
proxyAclStat = proxyFs.getAclStatus(path);
httpfsAclStat = httpfs.getAclStatus(path);
assertSameAcls(httpfsAclStat, proxyAclStat);
}
/**
* Simple acl tests on a directory: set a default acl, remove default acls.
* @throws Exception
*/
private void testDirAcls() throws Exception {
if ( isLocalFS() ) {
return;
}
final String defUser1 = "default:user:glarch:r-x";
FileSystem proxyFs = FileSystem.get(getProxiedFSConf());
FileSystem httpfs = getHttpFSFileSystem();
Path dir = getProxiedFSTestDir();
/* ACL Status on a directory */
AclStatus proxyAclStat = proxyFs.getAclStatus(dir);
AclStatus httpfsAclStat = httpfs.getAclStatus(dir);
assertSameAcls(httpfsAclStat, proxyAclStat);
/* Set a default ACL on the directory */
httpfs.setAcl(dir, (AclEntry.parseAclSpec(defUser1,true)));
proxyAclStat = proxyFs.getAclStatus(dir);
httpfsAclStat = httpfs.getAclStatus(dir);
assertSameAcls(httpfsAclStat, proxyAclStat);
/* Remove the default ACL */
httpfs.removeDefaultAcl(dir);
proxyAclStat = proxyFs.getAclStatus(dir);
httpfsAclStat = httpfs.getAclStatus(dir);
assertSameAcls(httpfsAclStat, proxyAclStat);
}
protected enum Operation {
GET, OPEN, CREATE, APPEND, TRUNCATE, CONCAT, RENAME, DELETE, LIST_STATUS,
WORKING_DIRECTORY, MKDIRS, SET_TIMES, SET_PERMISSION, SET_OWNER,
SET_REPLICATION, CHECKSUM, CONTENT_SUMMARY, FILEACLS, DIRACLS, SET_XATTR,
GET_XATTRS, REMOVE_XATTR, LIST_XATTRS
}
private void operation(Operation op) throws Exception {
switch (op) {
case GET:
testGet();
break;
case OPEN:
testOpen();
break;
case CREATE:
testCreate();
break;
case APPEND:
testAppend();
break;
case TRUNCATE:
testTruncate();
break;
case CONCAT:
testConcat();
break;
case RENAME:
testRename();
break;
case DELETE:
testDelete();
break;
case LIST_STATUS:
testListStatus();
break;
case WORKING_DIRECTORY:
testWorkingdirectory();
break;
case MKDIRS:
testMkdirs();
break;
case SET_TIMES:
testSetTimes();
break;
case SET_PERMISSION:
testSetPermission();
break;
case SET_OWNER:
testSetOwner();
break;
case SET_REPLICATION:
testSetReplication();
break;
case CHECKSUM:
testChecksum();
break;
case CONTENT_SUMMARY:
testContentSummary();
break;
case FILEACLS:
testFileAcls();
break;
case DIRACLS:
testDirAcls();
break;
case SET_XATTR:
testSetXAttr();
break;
case REMOVE_XATTR:
testRemoveXAttr();
break;
case GET_XATTRS:
testGetXAttrs();
break;
case LIST_XATTRS:
testListXAttrs();
break;
}
}
@Parameterized.Parameters
public static Collection operations() {
Object[][] ops = new Object[Operation.values().length][];
for (int i = 0; i < Operation.values().length; i++) {
ops[i] = new Object[]{Operation.values()[i]};
}
//To test one or a subset of operations do:
//return Arrays.asList(new Object[][]{ new Object[]{Operation.APPEND}});
return Arrays.asList(ops);
}
private Operation operation;
public BaseTestHttpFSWith(Operation operation) {
this.operation = operation;
}
@Test
@TestDir
@TestJetty
@TestHdfs
public void testOperation() throws Exception {
createHttpFSServer();
operation(operation);
}
@Test
@TestDir
@TestJetty
@TestHdfs
public void testOperationDoAs() throws Exception {
createHttpFSServer();
UserGroupInformation ugi = UserGroupInformation.createProxyUser(HadoopUsersConfTestHelper.getHadoopUsers()[0],
UserGroupInformation.getCurrentUser());
ugi.doAs(new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
operation(operation);
return null;
}
});
}
}
| 32,127 | 33.251599 | 114 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSWithKerberos.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.http.server;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.fs.DelegationTokenRenewer;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.http.client.HttpFSFileSystem;
import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authentication.client.AuthenticatedURL;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticator;
import org.apache.hadoop.test.HFSTestCase;
import org.apache.hadoop.test.KerberosTestUtils;
import org.apache.hadoop.test.TestDir;
import org.apache.hadoop.test.TestDirHelper;
import org.apache.hadoop.test.TestHdfs;
import org.apache.hadoop.test.TestHdfsHelper;
import org.apache.hadoop.test.TestJetty;
import org.apache.hadoop.test.TestJettyHelper;
import org.json.simple.JSONObject;
import org.json.simple.parser.JSONParser;
import org.junit.After;
import org.junit.Assert;
import org.junit.Test;
import org.mortbay.jetty.Server;
import org.mortbay.jetty.webapp.WebAppContext;
import java.io.File;
import java.io.FileOutputStream;
import java.io.FileWriter;
import java.io.InputStreamReader;
import java.io.OutputStream;
import java.io.Writer;
import java.net.HttpURLConnection;
import java.net.URI;
import java.net.URL;
import java.security.PrivilegedExceptionAction;
import java.util.concurrent.Callable;
public class TestHttpFSWithKerberos extends HFSTestCase {
@After
public void resetUGI() {
Configuration conf = new Configuration();
UserGroupInformation.setConfiguration(conf);
}
private void createHttpFSServer() throws Exception {
File homeDir = TestDirHelper.getTestDir();
Assert.assertTrue(new File(homeDir, "conf").mkdir());
Assert.assertTrue(new File(homeDir, "log").mkdir());
Assert.assertTrue(new File(homeDir, "temp").mkdir());
HttpFSServerWebApp.setHomeDirForCurrentThread(homeDir.getAbsolutePath());
File secretFile = new File(new File(homeDir, "conf"), "secret");
Writer w = new FileWriter(secretFile);
w.write("secret");
w.close();
//HDFS configuration
File hadoopConfDir = new File(new File(homeDir, "conf"), "hadoop-conf");
hadoopConfDir.mkdirs();
String fsDefaultName = TestHdfsHelper.getHdfsConf()
.get(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY);
Configuration conf = new Configuration(false);
conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, fsDefaultName);
File hdfsSite = new File(hadoopConfDir, "hdfs-site.xml");
OutputStream os = new FileOutputStream(hdfsSite);
conf.writeXml(os);
os.close();
conf = new Configuration(false);
conf.set("httpfs.proxyuser.client.hosts", "*");
conf.set("httpfs.proxyuser.client.groups", "*");
conf.set("httpfs.authentication.type", "kerberos");
conf.set("httpfs.authentication.signature.secret.file",
secretFile.getAbsolutePath());
File httpfsSite = new File(new File(homeDir, "conf"), "httpfs-site.xml");
os = new FileOutputStream(httpfsSite);
conf.writeXml(os);
os.close();
ClassLoader cl = Thread.currentThread().getContextClassLoader();
URL url = cl.getResource("webapp");
WebAppContext context = new WebAppContext(url.getPath(), "/webhdfs");
Server server = TestJettyHelper.getJettyServer();
server.addHandler(context);
server.start();
HttpFSServerWebApp.get().setAuthority(TestJettyHelper.getAuthority());
}
@Test
@TestDir
@TestJetty
@TestHdfs
public void testValidHttpFSAccess() throws Exception {
createHttpFSServer();
KerberosTestUtils.doAsClient(new Callable<Void>() {
@Override
public Void call() throws Exception {
URL url = new URL(TestJettyHelper.getJettyURL(),
"/webhdfs/v1/?op=GETHOMEDIRECTORY");
AuthenticatedURL aUrl = new AuthenticatedURL();
AuthenticatedURL.Token aToken = new AuthenticatedURL.Token();
HttpURLConnection conn = aUrl.openConnection(url, aToken);
Assert.assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_OK);
return null;
}
});
}
@Test
@TestDir
@TestJetty
@TestHdfs
public void testInvalidadHttpFSAccess() throws Exception {
createHttpFSServer();
URL url = new URL(TestJettyHelper.getJettyURL(),
"/webhdfs/v1/?op=GETHOMEDIRECTORY");
HttpURLConnection conn = (HttpURLConnection) url.openConnection();
Assert.assertEquals(conn.getResponseCode(),
HttpURLConnection.HTTP_UNAUTHORIZED);
}
@Test
@TestDir
@TestJetty
@TestHdfs
public void testDelegationTokenHttpFSAccess() throws Exception {
createHttpFSServer();
KerberosTestUtils.doAsClient(new Callable<Void>() {
@Override
public Void call() throws Exception {
//get delegation token doing SPNEGO authentication
URL url = new URL(TestJettyHelper.getJettyURL(),
"/webhdfs/v1/?op=GETDELEGATIONTOKEN");
AuthenticatedURL aUrl = new AuthenticatedURL();
AuthenticatedURL.Token aToken = new AuthenticatedURL.Token();
HttpURLConnection conn = aUrl.openConnection(url, aToken);
Assert.assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_OK);
JSONObject json = (JSONObject) new JSONParser()
.parse(new InputStreamReader(conn.getInputStream()));
json =
(JSONObject) json
.get(DelegationTokenAuthenticator.DELEGATION_TOKEN_JSON);
String tokenStr = (String) json
.get(DelegationTokenAuthenticator.DELEGATION_TOKEN_URL_STRING_JSON);
//access httpfs using the delegation token
url = new URL(TestJettyHelper.getJettyURL(),
"/webhdfs/v1/?op=GETHOMEDIRECTORY&delegation=" +
tokenStr);
conn = (HttpURLConnection) url.openConnection();
Assert.assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_OK);
//try to renew the delegation token without SPNEGO credentials
url = new URL(TestJettyHelper.getJettyURL(),
"/webhdfs/v1/?op=RENEWDELEGATIONTOKEN&token=" + tokenStr);
conn = (HttpURLConnection) url.openConnection();
conn.setRequestMethod("PUT");
Assert.assertEquals(conn.getResponseCode(),
HttpURLConnection.HTTP_UNAUTHORIZED);
//renew the delegation token with SPNEGO credentials
url = new URL(TestJettyHelper.getJettyURL(),
"/webhdfs/v1/?op=RENEWDELEGATIONTOKEN&token=" + tokenStr);
conn = aUrl.openConnection(url, aToken);
conn.setRequestMethod("PUT");
Assert.assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_OK);
//cancel delegation token, no need for SPNEGO credentials
url = new URL(TestJettyHelper.getJettyURL(),
"/webhdfs/v1/?op=CANCELDELEGATIONTOKEN&token=" +
tokenStr);
conn = (HttpURLConnection) url.openConnection();
conn.setRequestMethod("PUT");
Assert.assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_OK);
//try to access httpfs with the canceled delegation token
url = new URL(TestJettyHelper.getJettyURL(),
"/webhdfs/v1/?op=GETHOMEDIRECTORY&delegation=" +
tokenStr);
conn = (HttpURLConnection) url.openConnection();
Assert.assertEquals(conn.getResponseCode(),
HttpURLConnection.HTTP_UNAUTHORIZED);
return null;
}
});
}
@SuppressWarnings("deprecation")
private void testDelegationTokenWithFS(Class fileSystemClass)
throws Exception {
createHttpFSServer();
Configuration conf = new Configuration();
conf.set("fs.webhdfs.impl", fileSystemClass.getName());
conf.set("fs.hdfs.impl.disable.cache", "true");
URI uri = new URI( "webhdfs://" +
TestJettyHelper.getJettyURL().toURI().getAuthority());
FileSystem fs = FileSystem.get(uri, conf);
Token<?> tokens[] = fs.addDelegationTokens("foo", null);
fs.close();
Assert.assertEquals(1, tokens.length);
fs = FileSystem.get(uri, conf);
((DelegationTokenRenewer.Renewable) fs).setDelegationToken(tokens[0]);
fs.listStatus(new Path("/"));
fs.close();
}
private void testDelegationTokenWithinDoAs(
final Class fileSystemClass, boolean proxyUser) throws Exception {
Configuration conf = new Configuration();
conf.set("hadoop.security.authentication", "kerberos");
UserGroupInformation.setConfiguration(conf);
UserGroupInformation.loginUserFromKeytab("client",
"/Users/tucu/tucu.keytab");
UserGroupInformation ugi = UserGroupInformation.getLoginUser();
if (proxyUser) {
ugi = UserGroupInformation.createProxyUser("foo", ugi);
}
conf = new Configuration();
UserGroupInformation.setConfiguration(conf);
ugi.doAs(
new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
testDelegationTokenWithFS(fileSystemClass);
return null;
}
});
}
@Test
@TestDir
@TestJetty
@TestHdfs
public void testDelegationTokenWithHttpFSFileSystem() throws Exception {
testDelegationTokenWithinDoAs(HttpFSFileSystem.class, false);
}
@Test
@TestDir
@TestJetty
@TestHdfs
public void testDelegationTokenWithWebhdfsFileSystem() throws Exception {
testDelegationTokenWithinDoAs(WebHdfsFileSystem.class, false);
}
@Test
@TestDir
@TestJetty
@TestHdfs
public void testDelegationTokenWithHttpFSFileSystemProxyUser()
throws Exception {
testDelegationTokenWithinDoAs(HttpFSFileSystem.class, true);
}
// TODO: WebHdfsFilesystem does work with ProxyUser HDFS-3509
// @Test
// @TestDir
// @TestJetty
// @TestHdfs
// public void testDelegationTokenWithWebhdfsFileSystemProxyUser()
// throws Exception {
// testDelegationTokenWithinDoAs(WebHdfsFileSystem.class, true);
// }
}
| 11,091 | 36.856655 | 84 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServerNoXAttrs.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.http.server;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.test.HTestCase;
import org.apache.hadoop.test.HadoopUsersConfTestHelper;
import org.apache.hadoop.test.TestDir;
import org.apache.hadoop.test.TestDirHelper;
import org.apache.hadoop.test.TestHdfs;
import org.apache.hadoop.test.TestJetty;
import org.apache.hadoop.test.TestJettyHelper;
import org.junit.Assert;
import org.junit.Test;
import org.mortbay.jetty.Server;
import org.mortbay.jetty.webapp.WebAppContext;
import java.io.BufferedReader;
import java.io.File;
import java.io.FileOutputStream;
import java.io.FileWriter;
import java.io.IOException;
import java.io.InputStreamReader;
import java.io.OutputStream;
import java.io.Writer;
import java.net.HttpURLConnection;
import java.net.URL;
import java.text.MessageFormat;
/**
* This test class ensures that everything works as expected when XAttr
* support is turned off HDFS. This is the default configuration. The other
* tests operate with XAttr support turned on.
*/
public class TestHttpFSServerNoXAttrs extends HTestCase {
private MiniDFSCluster miniDfs;
private Configuration nnConf;
/**
* Fire up our own hand-rolled MiniDFSCluster. We do this here instead
* of relying on TestHdfsHelper because we don't want to turn on XAttr
* support.
*
* @throws Exception
*/
private void startMiniDFS() throws Exception {
File testDirRoot = TestDirHelper.getTestDir();
if (System.getProperty("hadoop.log.dir") == null) {
System.setProperty("hadoop.log.dir",
new File(testDirRoot, "hadoop-log").getAbsolutePath());
}
if (System.getProperty("test.build.data") == null) {
System.setProperty("test.build.data",
new File(testDirRoot, "hadoop-data").getAbsolutePath());
}
Configuration conf = HadoopUsersConfTestHelper.getBaseConf();
HadoopUsersConfTestHelper.addUserConf(conf);
conf.set("fs.hdfs.impl.disable.cache", "true");
conf.set("dfs.block.access.token.enable", "false");
conf.set("dfs.permissions", "true");
conf.set("hadoop.security.authentication", "simple");
// Explicitly turn off XAttr support
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_KEY, false);
MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf);
builder.numDataNodes(2);
miniDfs = builder.build();
nnConf = miniDfs.getConfiguration(0);
}
/**
* Create an HttpFS Server to talk to the MiniDFSCluster we created.
* @throws Exception
*/
private void createHttpFSServer() throws Exception {
File homeDir = TestDirHelper.getTestDir();
Assert.assertTrue(new File(homeDir, "conf").mkdir());
Assert.assertTrue(new File(homeDir, "log").mkdir());
Assert.assertTrue(new File(homeDir, "temp").mkdir());
HttpFSServerWebApp.setHomeDirForCurrentThread(homeDir.getAbsolutePath());
File secretFile = new File(new File(homeDir, "conf"), "secret");
Writer w = new FileWriter(secretFile);
w.write("secret");
w.close();
// HDFS configuration
File hadoopConfDir = new File(new File(homeDir, "conf"), "hadoop-conf");
if ( !hadoopConfDir.mkdirs() ) {
throw new IOException();
}
String fsDefaultName =
nnConf.get(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY);
Configuration conf = new Configuration(false);
conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, fsDefaultName);
// Explicitly turn off XAttr support
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_KEY, false);
File hdfsSite = new File(hadoopConfDir, "hdfs-site.xml");
OutputStream os = new FileOutputStream(hdfsSite);
conf.writeXml(os);
os.close();
// HTTPFS configuration
conf = new Configuration(false);
conf.set("httpfs.hadoop.config.dir", hadoopConfDir.toString());
conf.set("httpfs.proxyuser." +
HadoopUsersConfTestHelper.getHadoopProxyUser() + ".groups",
HadoopUsersConfTestHelper.getHadoopProxyUserGroups());
conf.set("httpfs.proxyuser." +
HadoopUsersConfTestHelper.getHadoopProxyUser() + ".hosts",
HadoopUsersConfTestHelper.getHadoopProxyUserHosts());
conf.set("httpfs.authentication.signature.secret.file",
secretFile.getAbsolutePath());
File httpfsSite = new File(new File(homeDir, "conf"), "httpfs-site.xml");
os = new FileOutputStream(httpfsSite);
conf.writeXml(os);
os.close();
ClassLoader cl = Thread.currentThread().getContextClassLoader();
URL url = cl.getResource("webapp");
if ( url == null ) {
throw new IOException();
}
WebAppContext context = new WebAppContext(url.getPath(), "/webhdfs");
Server server = TestJettyHelper.getJettyServer();
server.addHandler(context);
server.start();
}
/**
* Talks to the http interface to get the json output of a *STATUS command
* on the given file.
*
* @param filename The file to query.
* @param command Either GETXATTRS, SETXATTR, or REMOVEXATTR
* @throws Exception
*/
private void getStatus(String filename, String command)
throws Exception {
String user = HadoopUsersConfTestHelper.getHadoopUsers()[0];
// Remove leading / from filename
if ( filename.charAt(0) == '/' ) {
filename = filename.substring(1);
}
String pathOps = MessageFormat.format(
"/webhdfs/v1/{0}?user.name={1}&op={2}",
filename, user, command);
URL url = new URL(TestJettyHelper.getJettyURL(), pathOps);
HttpURLConnection conn = (HttpURLConnection) url.openConnection();
conn.connect();
int resp = conn.getResponseCode();
BufferedReader reader;
Assert.assertEquals(HttpURLConnection.HTTP_INTERNAL_ERROR, resp);
reader = new BufferedReader(new InputStreamReader(conn.getErrorStream()));
String res = reader.readLine();
Assert.assertTrue(res.contains("RemoteException"));
Assert.assertTrue(res.contains("XAttr"));
Assert.assertTrue(res.contains("rejected"));
}
/**
* General-purpose http PUT command to the httpfs server.
* @param filename The file to operate upon
* @param command The command to perform (SETXATTR, etc)
* @param params Parameters
*/
private void putCmd(String filename, String command,
String params) throws Exception {
String user = HadoopUsersConfTestHelper.getHadoopUsers()[0];
// Remove leading / from filename
if ( filename.charAt(0) == '/' ) {
filename = filename.substring(1);
}
String pathOps = MessageFormat.format(
"/webhdfs/v1/{0}?user.name={1}{2}{3}&op={4}",
filename, user, (params == null) ? "" : "&",
(params == null) ? "" : params, command);
URL url = new URL(TestJettyHelper.getJettyURL(), pathOps);
HttpURLConnection conn = (HttpURLConnection) url.openConnection();
conn.setRequestMethod("PUT");
conn.connect();
int resp = conn.getResponseCode();
Assert.assertEquals(HttpURLConnection.HTTP_INTERNAL_ERROR, resp);
BufferedReader reader;
reader = new BufferedReader(new InputStreamReader(conn.getErrorStream()));
String err = reader.readLine();
Assert.assertTrue(err.contains("RemoteException"));
Assert.assertTrue(err.contains("XAttr"));
Assert.assertTrue(err.contains("rejected"));
}
/**
* Ensure that GETXATTRS, SETXATTR, REMOVEXATTR fail.
*/
@Test
@TestDir
@TestJetty
@TestHdfs
public void testWithXAttrs() throws Exception {
final String name1 = "user.a1";
final byte[] value1 = new byte[]{0x31, 0x32, 0x33};
final String dir = "/noXAttr";
final String path = dir + "/file";
startMiniDFS();
createHttpFSServer();
FileSystem fs = FileSystem.get(nnConf);
fs.mkdirs(new Path(dir));
OutputStream os = fs.create(new Path(path));
os.write(1);
os.close();
/* GETXATTRS, SETXATTR, REMOVEXATTR fail */
getStatus(path, "GETXATTRS");
putCmd(path, "SETXATTR", TestHttpFSServer.setXAttrParam(name1, value1));
putCmd(path, "REMOVEXATTR", "xattr.name=" + name1);
}
}
| 9,184 | 36.03629 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/HttpFSKerberosAuthenticationHandlerForTesting.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.http.server;
import org.apache.hadoop.security.token.delegation.web.KerberosDelegationTokenAuthenticationHandler;
import javax.servlet.ServletException;
import java.util.Properties;
public class HttpFSKerberosAuthenticationHandlerForTesting
extends KerberosDelegationTokenAuthenticationHandler {
@Override
public void init(Properties config) throws ServletException {
//NOP overwrite to avoid Kerberos initialization
config.setProperty(TOKEN_KIND, "t");
initTokenManager(config);
}
@Override
public void destroy() {
//NOP overwrite to avoid Kerberos initialization
}
}
| 1,440 | 35.025 | 100 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServer.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.http.server;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.security.authentication.util.SignerSecretProvider;
import org.apache.hadoop.security.authentication.util.StringSignerSecretProviderCreator;
import org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticator;
import org.apache.hadoop.security.token.delegation.web.KerberosDelegationTokenAuthenticationHandler;
import org.json.simple.JSONArray;
import org.junit.Assert;
import java.io.BufferedReader;
import java.io.File;
import java.io.FileOutputStream;
import java.io.FileWriter;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.OutputStream;
import java.io.Writer;
import java.net.HttpURLConnection;
import java.net.URL;
import java.text.MessageFormat;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Map;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.XAttrCodec;
import org.apache.hadoop.lib.server.Service;
import org.apache.hadoop.lib.server.ServiceException;
import org.apache.hadoop.lib.service.Groups;
import org.apache.hadoop.security.authentication.client.AuthenticatedURL;
import org.apache.hadoop.security.authentication.server.AuthenticationToken;
import org.apache.hadoop.security.authentication.util.Signer;
import org.apache.hadoop.test.HFSTestCase;
import org.apache.hadoop.test.HadoopUsersConfTestHelper;
import org.apache.hadoop.test.TestDir;
import org.apache.hadoop.test.TestDirHelper;
import org.apache.hadoop.test.TestHdfs;
import org.apache.hadoop.test.TestHdfsHelper;
import org.apache.hadoop.test.TestJetty;
import org.apache.hadoop.test.TestJettyHelper;
import org.json.simple.JSONObject;
import org.json.simple.parser.JSONParser;
import org.junit.Test;
import org.mortbay.jetty.Server;
import org.mortbay.jetty.webapp.WebAppContext;
import com.google.common.collect.Maps;
import java.util.Properties;
import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
public class TestHttpFSServer extends HFSTestCase {
@Test
@TestDir
@TestJetty
public void server() throws Exception {
String dir = TestDirHelper.getTestDir().getAbsolutePath();
Configuration httpfsConf = new Configuration(false);
HttpFSServerWebApp server = new HttpFSServerWebApp(dir, dir, dir, dir, httpfsConf);
server.init();
server.destroy();
}
public static class MockGroups implements Service,Groups {
@Override
public void init(org.apache.hadoop.lib.server.Server server) throws ServiceException {
}
@Override
public void postInit() throws ServiceException {
}
@Override
public void destroy() {
}
@Override
public Class[] getServiceDependencies() {
return new Class[0];
}
@Override
public Class getInterface() {
return Groups.class;
}
@Override
public void serverStatusChange(org.apache.hadoop.lib.server.Server.Status oldStatus,
org.apache.hadoop.lib.server.Server.Status newStatus) throws ServiceException {
}
@Override
public List<String> getGroups(String user) throws IOException {
return Arrays.asList(HadoopUsersConfTestHelper.getHadoopUserGroups(user));
}
}
private void createHttpFSServer(boolean addDelegationTokenAuthHandler)
throws Exception {
File homeDir = TestDirHelper.getTestDir();
Assert.assertTrue(new File(homeDir, "conf").mkdir());
Assert.assertTrue(new File(homeDir, "log").mkdir());
Assert.assertTrue(new File(homeDir, "temp").mkdir());
HttpFSServerWebApp.setHomeDirForCurrentThread(homeDir.getAbsolutePath());
File secretFile = new File(new File(homeDir, "conf"), "secret");
Writer w = new FileWriter(secretFile);
w.write("secret");
w.close();
//HDFS configuration
File hadoopConfDir = new File(new File(homeDir, "conf"), "hadoop-conf");
hadoopConfDir.mkdirs();
String fsDefaultName = TestHdfsHelper.getHdfsConf().get(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY);
Configuration conf = new Configuration(false);
conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, fsDefaultName);
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_KEY, true);
File hdfsSite = new File(hadoopConfDir, "hdfs-site.xml");
OutputStream os = new FileOutputStream(hdfsSite);
conf.writeXml(os);
os.close();
//HTTPFS configuration
conf = new Configuration(false);
if (addDelegationTokenAuthHandler) {
conf.set("httpfs.authentication.type",
HttpFSKerberosAuthenticationHandlerForTesting.class.getName());
}
conf.set("httpfs.services.ext", MockGroups.class.getName());
conf.set("httpfs.admin.group", HadoopUsersConfTestHelper.
getHadoopUserGroups(HadoopUsersConfTestHelper.getHadoopUsers()[0])[0]);
conf.set("httpfs.proxyuser." + HadoopUsersConfTestHelper.getHadoopProxyUser() + ".groups",
HadoopUsersConfTestHelper.getHadoopProxyUserGroups());
conf.set("httpfs.proxyuser." + HadoopUsersConfTestHelper.getHadoopProxyUser() + ".hosts",
HadoopUsersConfTestHelper.getHadoopProxyUserHosts());
conf.set("httpfs.authentication.signature.secret.file", secretFile.getAbsolutePath());
conf.set("httpfs.hadoop.config.dir", hadoopConfDir.toString());
File httpfsSite = new File(new File(homeDir, "conf"), "httpfs-site.xml");
os = new FileOutputStream(httpfsSite);
conf.writeXml(os);
os.close();
ClassLoader cl = Thread.currentThread().getContextClassLoader();
URL url = cl.getResource("webapp");
WebAppContext context = new WebAppContext(url.getPath(), "/webhdfs");
Server server = TestJettyHelper.getJettyServer();
server.addHandler(context);
server.start();
if (addDelegationTokenAuthHandler) {
HttpFSServerWebApp.get().setAuthority(TestJettyHelper.getAuthority());
}
}
@Test
@TestDir
@TestJetty
@TestHdfs
public void instrumentation() throws Exception {
createHttpFSServer(false);
URL url = new URL(TestJettyHelper.getJettyURL(),
MessageFormat.format("/webhdfs/v1?user.name={0}&op=instrumentation", "nobody"));
HttpURLConnection conn = (HttpURLConnection) url.openConnection();
Assert.assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_UNAUTHORIZED);
url = new URL(TestJettyHelper.getJettyURL(),
MessageFormat.format("/webhdfs/v1?user.name={0}&op=instrumentation",
HadoopUsersConfTestHelper.getHadoopUsers()[0]));
conn = (HttpURLConnection) url.openConnection();
Assert.assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_OK);
BufferedReader reader = new BufferedReader(new InputStreamReader(conn.getInputStream()));
String line = reader.readLine();
reader.close();
Assert.assertTrue(line.contains("\"counters\":{"));
url = new URL(TestJettyHelper.getJettyURL(),
MessageFormat.format("/webhdfs/v1/foo?user.name={0}&op=instrumentation",
HadoopUsersConfTestHelper.getHadoopUsers()[0]));
conn = (HttpURLConnection) url.openConnection();
Assert.assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_BAD_REQUEST);
}
@Test
@TestDir
@TestJetty
@TestHdfs
public void testHdfsAccess() throws Exception {
createHttpFSServer(false);
String user = HadoopUsersConfTestHelper.getHadoopUsers()[0];
URL url = new URL(TestJettyHelper.getJettyURL(),
MessageFormat.format("/webhdfs/v1/?user.name={0}&op=liststatus", user));
HttpURLConnection conn = (HttpURLConnection) url.openConnection();
Assert.assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_OK);
BufferedReader reader = new BufferedReader(new InputStreamReader(conn.getInputStream()));
reader.readLine();
reader.close();
}
@Test
@TestDir
@TestJetty
@TestHdfs
public void testGlobFilter() throws Exception {
createHttpFSServer(false);
FileSystem fs = FileSystem.get(TestHdfsHelper.getHdfsConf());
fs.mkdirs(new Path("/tmp"));
fs.create(new Path("/tmp/foo.txt")).close();
String user = HadoopUsersConfTestHelper.getHadoopUsers()[0];
URL url = new URL(TestJettyHelper.getJettyURL(),
MessageFormat.format("/webhdfs/v1/tmp?user.name={0}&op=liststatus&filter=f*", user));
HttpURLConnection conn = (HttpURLConnection) url.openConnection();
Assert.assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_OK);
BufferedReader reader = new BufferedReader(new InputStreamReader(conn.getInputStream()));
reader.readLine();
reader.close();
}
/**
* Talks to the http interface to create a file.
*
* @param filename The file to create
* @param perms The permission field, if any (may be null)
* @throws Exception
*/
private void createWithHttp ( String filename, String perms )
throws Exception {
String user = HadoopUsersConfTestHelper.getHadoopUsers()[0];
// Remove leading / from filename
if ( filename.charAt(0) == '/' ) {
filename = filename.substring(1);
}
String pathOps;
if ( perms == null ) {
pathOps = MessageFormat.format(
"/webhdfs/v1/{0}?user.name={1}&op=CREATE",
filename, user);
} else {
pathOps = MessageFormat.format(
"/webhdfs/v1/{0}?user.name={1}&permission={2}&op=CREATE",
filename, user, perms);
}
URL url = new URL(TestJettyHelper.getJettyURL(), pathOps);
HttpURLConnection conn = (HttpURLConnection) url.openConnection();
conn.addRequestProperty("Content-Type", "application/octet-stream");
conn.setRequestMethod("PUT");
conn.connect();
Assert.assertEquals(HttpURLConnection.HTTP_CREATED, conn.getResponseCode());
}
/**
* Talks to the http interface to get the json output of a *STATUS command
* on the given file.
*
* @param filename The file to query.
* @param command Either GETFILESTATUS, LISTSTATUS, or ACLSTATUS
* @return A string containing the JSON output describing the file.
* @throws Exception
*/
private String getStatus(String filename, String command)
throws Exception {
String user = HadoopUsersConfTestHelper.getHadoopUsers()[0];
// Remove leading / from filename
if ( filename.charAt(0) == '/' ) {
filename = filename.substring(1);
}
String pathOps = MessageFormat.format(
"/webhdfs/v1/{0}?user.name={1}&op={2}",
filename, user, command);
URL url = new URL(TestJettyHelper.getJettyURL(), pathOps);
HttpURLConnection conn = (HttpURLConnection) url.openConnection();
conn.connect();
Assert.assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode());
BufferedReader reader =
new BufferedReader(new InputStreamReader(conn.getInputStream()));
return reader.readLine();
}
/**
* General-purpose http PUT command to the httpfs server.
* @param filename The file to operate upon
* @param command The command to perform (SETACL, etc)
* @param params Parameters, like "aclspec=..."
*/
private void putCmd(String filename, String command,
String params) throws Exception {
String user = HadoopUsersConfTestHelper.getHadoopUsers()[0];
// Remove leading / from filename
if ( filename.charAt(0) == '/' ) {
filename = filename.substring(1);
}
String pathOps = MessageFormat.format(
"/webhdfs/v1/{0}?user.name={1}{2}{3}&op={4}",
filename, user, (params == null) ? "" : "&",
(params == null) ? "" : params, command);
URL url = new URL(TestJettyHelper.getJettyURL(), pathOps);
HttpURLConnection conn = (HttpURLConnection) url.openConnection();
conn.setRequestMethod("PUT");
conn.connect();
Assert.assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode());
}
/**
* Given the JSON output from the GETFILESTATUS call, return the
* 'permission' value.
*
* @param statusJson JSON from GETFILESTATUS
* @return The value of 'permission' in statusJson
* @throws Exception
*/
private String getPerms ( String statusJson ) throws Exception {
JSONParser parser = new JSONParser();
JSONObject jsonObject = (JSONObject) parser.parse(statusJson);
JSONObject details = (JSONObject) jsonObject.get("FileStatus");
return (String) details.get("permission");
}
/**
* Given the JSON output from the GETACLSTATUS call, return the
* 'entries' value as a List<String>.
* @param statusJson JSON from GETACLSTATUS
* @return A List of Strings which are the elements of the ACL entries
* @throws Exception
*/
private List<String> getAclEntries ( String statusJson ) throws Exception {
List<String> entries = new ArrayList<String>();
JSONParser parser = new JSONParser();
JSONObject jsonObject = (JSONObject) parser.parse(statusJson);
JSONObject details = (JSONObject) jsonObject.get("AclStatus");
JSONArray jsonEntries = (JSONArray) details.get("entries");
if ( jsonEntries != null ) {
for (Object e : jsonEntries) {
entries.add(e.toString());
}
}
return entries;
}
/**
* Parse xAttrs from JSON result of GETXATTRS call, return xAttrs Map.
* @param statusJson JSON from GETXATTRS
* @return Map<String, byte[]> xAttrs Map
* @throws Exception
*/
private Map<String, byte[]> getXAttrs(String statusJson) throws Exception {
Map<String, byte[]> xAttrs = Maps.newHashMap();
JSONParser parser = new JSONParser();
JSONObject jsonObject = (JSONObject) parser.parse(statusJson);
JSONArray jsonXAttrs = (JSONArray) jsonObject.get("XAttrs");
if (jsonXAttrs != null) {
for (Object a : jsonXAttrs) {
String name = (String) ((JSONObject)a).get("name");
String value = (String) ((JSONObject)a).get("value");
xAttrs.put(name, decodeXAttrValue(value));
}
}
return xAttrs;
}
/** Decode xattr value from string */
private byte[] decodeXAttrValue(String value) throws IOException {
if (value != null) {
return XAttrCodec.decodeValue(value);
} else {
return new byte[0];
}
}
/**
* Validate that files are created with 755 permissions when no
* 'permissions' attribute is specified, and when 'permissions'
* is specified, that value is honored.
*/
@Test
@TestDir
@TestJetty
@TestHdfs
public void testPerms() throws Exception {
createHttpFSServer(false);
FileSystem fs = FileSystem.get(TestHdfsHelper.getHdfsConf());
fs.mkdirs(new Path("/perm"));
createWithHttp("/perm/none", null);
String statusJson = getStatus("/perm/none", "GETFILESTATUS");
Assert.assertTrue("755".equals(getPerms(statusJson)));
createWithHttp("/perm/p-777", "777");
statusJson = getStatus("/perm/p-777", "GETFILESTATUS");
Assert.assertTrue("777".equals(getPerms(statusJson)));
createWithHttp("/perm/p-654", "654");
statusJson = getStatus("/perm/p-654", "GETFILESTATUS");
Assert.assertTrue("654".equals(getPerms(statusJson)));
createWithHttp("/perm/p-321", "321");
statusJson = getStatus("/perm/p-321", "GETFILESTATUS");
Assert.assertTrue("321".equals(getPerms(statusJson)));
}
/**
* Validate XAttr get/set/remove calls.
*/
@Test
@TestDir
@TestJetty
@TestHdfs
public void testXAttrs() throws Exception {
final String name1 = "user.a1";
final byte[] value1 = new byte[]{0x31, 0x32, 0x33};
final String name2 = "user.a2";
final byte[] value2 = new byte[]{0x41, 0x42, 0x43};
final String dir = "/xattrTest";
final String path = dir + "/file";
createHttpFSServer(false);
FileSystem fs = FileSystem.get(TestHdfsHelper.getHdfsConf());
fs.mkdirs(new Path(dir));
createWithHttp(path,null);
String statusJson = getStatus(path, "GETXATTRS");
Map<String, byte[]> xAttrs = getXAttrs(statusJson);
Assert.assertEquals(0, xAttrs.size());
// Set two xattrs
putCmd(path, "SETXATTR", setXAttrParam(name1, value1));
putCmd(path, "SETXATTR", setXAttrParam(name2, value2));
statusJson = getStatus(path, "GETXATTRS");
xAttrs = getXAttrs(statusJson);
Assert.assertEquals(2, xAttrs.size());
Assert.assertArrayEquals(value1, xAttrs.get(name1));
Assert.assertArrayEquals(value2, xAttrs.get(name2));
// Remove one xattr
putCmd(path, "REMOVEXATTR", "xattr.name=" + name1);
statusJson = getStatus(path, "GETXATTRS");
xAttrs = getXAttrs(statusJson);
Assert.assertEquals(1, xAttrs.size());
Assert.assertArrayEquals(value2, xAttrs.get(name2));
// Remove another xattr, then there is no xattr
putCmd(path, "REMOVEXATTR", "xattr.name=" + name2);
statusJson = getStatus(path, "GETXATTRS");
xAttrs = getXAttrs(statusJson);
Assert.assertEquals(0, xAttrs.size());
}
/** Params for setting an xAttr */
public static String setXAttrParam(String name, byte[] value) throws IOException {
return "xattr.name=" + name + "&xattr.value=" + XAttrCodec.encodeValue(
value, XAttrCodec.HEX) + "&encoding=hex&flag=create";
}
/**
* Validate the various ACL set/modify/remove calls. General strategy is
* to verify each of the following steps with GETFILESTATUS, LISTSTATUS,
* and GETACLSTATUS:
* <ol>
* <li>Create a file with no ACLs</li>
* <li>Add a user + group ACL</li>
* <li>Add another user ACL</li>
* <li>Remove the first user ACL</li>
* <li>Remove all ACLs</li>
* </ol>
*/
@Test
@TestDir
@TestJetty
@TestHdfs
public void testFileAcls() throws Exception {
final String aclUser1 = "user:foo:rw-";
final String aclUser2 = "user:bar:r--";
final String aclGroup1 = "group::r--";
final String aclSpec = "aclspec=user::rwx," + aclUser1 + ","
+ aclGroup1 + ",other::---";
final String modAclSpec = "aclspec=" + aclUser2;
final String remAclSpec = "aclspec=" + aclUser1;
final String dir = "/aclFileTest";
final String path = dir + "/test";
String statusJson;
List<String> aclEntries;
createHttpFSServer(false);
FileSystem fs = FileSystem.get(TestHdfsHelper.getHdfsConf());
fs.mkdirs(new Path(dir));
createWithHttp(path, null);
/* getfilestatus and liststatus don't have 'aclBit' in their reply */
statusJson = getStatus(path, "GETFILESTATUS");
Assert.assertEquals(-1, statusJson.indexOf("aclBit"));
statusJson = getStatus(dir, "LISTSTATUS");
Assert.assertEquals(-1, statusJson.indexOf("aclBit"));
/* getaclstatus works and returns no entries */
statusJson = getStatus(path, "GETACLSTATUS");
aclEntries = getAclEntries(statusJson);
Assert.assertTrue(aclEntries.size() == 0);
/*
* Now set an ACL on the file. (getfile|list)status have aclBit,
* and aclstatus has entries that looks familiar.
*/
putCmd(path, "SETACL", aclSpec);
statusJson = getStatus(path, "GETFILESTATUS");
Assert.assertNotEquals(-1, statusJson.indexOf("aclBit"));
statusJson = getStatus(dir, "LISTSTATUS");
Assert.assertNotEquals(-1, statusJson.indexOf("aclBit"));
statusJson = getStatus(path, "GETACLSTATUS");
aclEntries = getAclEntries(statusJson);
Assert.assertTrue(aclEntries.size() == 2);
Assert.assertTrue(aclEntries.contains(aclUser1));
Assert.assertTrue(aclEntries.contains(aclGroup1));
/* Modify acl entries to add another user acl */
putCmd(path, "MODIFYACLENTRIES", modAclSpec);
statusJson = getStatus(path, "GETACLSTATUS");
aclEntries = getAclEntries(statusJson);
Assert.assertTrue(aclEntries.size() == 3);
Assert.assertTrue(aclEntries.contains(aclUser1));
Assert.assertTrue(aclEntries.contains(aclUser2));
Assert.assertTrue(aclEntries.contains(aclGroup1));
/* Remove the first user acl entry and verify */
putCmd(path, "REMOVEACLENTRIES", remAclSpec);
statusJson = getStatus(path, "GETACLSTATUS");
aclEntries = getAclEntries(statusJson);
Assert.assertTrue(aclEntries.size() == 2);
Assert.assertTrue(aclEntries.contains(aclUser2));
Assert.assertTrue(aclEntries.contains(aclGroup1));
/* Remove all acls and verify */
putCmd(path, "REMOVEACL", null);
statusJson = getStatus(path, "GETACLSTATUS");
aclEntries = getAclEntries(statusJson);
Assert.assertTrue(aclEntries.size() == 0);
statusJson = getStatus(path, "GETFILESTATUS");
Assert.assertEquals(-1, statusJson.indexOf("aclBit"));
statusJson = getStatus(dir, "LISTSTATUS");
Assert.assertEquals(-1, statusJson.indexOf("aclBit"));
}
/**
* Test ACL operations on a directory, including default ACLs.
* General strategy is to use GETFILESTATUS and GETACLSTATUS to verify:
* <ol>
* <li>Initial status with no ACLs</li>
* <li>The addition of a default ACL</li>
* <li>The removal of default ACLs</li>
* </ol>
*
* @throws Exception
*/
@Test
@TestDir
@TestJetty
@TestHdfs
public void testDirAcls() throws Exception {
final String defUser1 = "default:user:glarch:r-x";
final String defSpec1 = "aclspec=" + defUser1;
final String dir = "/aclDirTest";
String statusJson;
List<String> aclEntries;
createHttpFSServer(false);
FileSystem fs = FileSystem.get(TestHdfsHelper.getHdfsConf());
fs.mkdirs(new Path(dir));
/* getfilestatus and liststatus don't have 'aclBit' in their reply */
statusJson = getStatus(dir, "GETFILESTATUS");
Assert.assertEquals(-1, statusJson.indexOf("aclBit"));
/* No ACLs, either */
statusJson = getStatus(dir, "GETACLSTATUS");
aclEntries = getAclEntries(statusJson);
Assert.assertTrue(aclEntries.size() == 0);
/* Give it a default ACL and verify */
putCmd(dir, "SETACL", defSpec1);
statusJson = getStatus(dir, "GETFILESTATUS");
Assert.assertNotEquals(-1, statusJson.indexOf("aclBit"));
statusJson = getStatus(dir, "GETACLSTATUS");
aclEntries = getAclEntries(statusJson);
Assert.assertTrue(aclEntries.size() == 5);
/* 4 Entries are default:(user|group|mask|other):perm */
Assert.assertTrue(aclEntries.contains(defUser1));
/* Remove the default ACL and re-verify */
putCmd(dir, "REMOVEDEFAULTACL", null);
statusJson = getStatus(dir, "GETFILESTATUS");
Assert.assertEquals(-1, statusJson.indexOf("aclBit"));
statusJson = getStatus(dir, "GETACLSTATUS");
aclEntries = getAclEntries(statusJson);
Assert.assertTrue(aclEntries.size() == 0);
}
@Test
@TestDir
@TestJetty
@TestHdfs
public void testOpenOffsetLength() throws Exception {
createHttpFSServer(false);
byte[] array = new byte[]{0, 1, 2, 3};
FileSystem fs = FileSystem.get(TestHdfsHelper.getHdfsConf());
fs.mkdirs(new Path("/tmp"));
OutputStream os = fs.create(new Path("/tmp/foo"));
os.write(array);
os.close();
String user = HadoopUsersConfTestHelper.getHadoopUsers()[0];
URL url = new URL(TestJettyHelper.getJettyURL(),
MessageFormat.format("/webhdfs/v1/tmp/foo?user.name={0}&op=open&offset=1&length=2", user));
HttpURLConnection conn = (HttpURLConnection) url.openConnection();
Assert.assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode());
InputStream is = conn.getInputStream();
Assert.assertEquals(1, is.read());
Assert.assertEquals(2, is.read());
Assert.assertEquals(-1, is.read());
}
@Test
@TestDir
@TestJetty
@TestHdfs
public void testPutNoOperation() throws Exception {
createHttpFSServer(false);
String user = HadoopUsersConfTestHelper.getHadoopUsers()[0];
URL url = new URL(TestJettyHelper.getJettyURL(),
MessageFormat.format("/webhdfs/v1/foo?user.name={0}", user));
HttpURLConnection conn = (HttpURLConnection) url.openConnection();
conn.setDoInput(true);
conn.setDoOutput(true);
conn.setRequestMethod("PUT");
Assert.assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_BAD_REQUEST);
}
@Test
@TestDir
@TestJetty
@TestHdfs
public void testDelegationTokenOperations() throws Exception {
createHttpFSServer(true);
URL url = new URL(TestJettyHelper.getJettyURL(),
"/webhdfs/v1/?op=GETHOMEDIRECTORY");
HttpURLConnection conn = (HttpURLConnection) url.openConnection();
Assert.assertEquals(HttpURLConnection.HTTP_UNAUTHORIZED,
conn.getResponseCode());
AuthenticationToken token =
new AuthenticationToken("u", "p",
new KerberosDelegationTokenAuthenticationHandler().getType());
token.setExpires(System.currentTimeMillis() + 100000000);
SignerSecretProvider secretProvider =
StringSignerSecretProviderCreator.newStringSignerSecretProvider();
Properties secretProviderProps = new Properties();
secretProviderProps.setProperty(AuthenticationFilter.SIGNATURE_SECRET, "secret");
secretProvider.init(secretProviderProps, null, -1);
Signer signer = new Signer(secretProvider);
String tokenSigned = signer.sign(token.toString());
url = new URL(TestJettyHelper.getJettyURL(),
"/webhdfs/v1/?op=GETHOMEDIRECTORY");
conn = (HttpURLConnection) url.openConnection();
conn.setRequestProperty("Cookie",
AuthenticatedURL.AUTH_COOKIE + "=" + tokenSigned);
Assert.assertEquals(HttpURLConnection.HTTP_OK,
conn.getResponseCode());
url = new URL(TestJettyHelper.getJettyURL(),
"/webhdfs/v1/?op=GETDELEGATIONTOKEN");
conn = (HttpURLConnection) url.openConnection();
conn.setRequestProperty("Cookie",
AuthenticatedURL.AUTH_COOKIE + "=" + tokenSigned);
Assert.assertEquals(HttpURLConnection.HTTP_OK,
conn.getResponseCode());
JSONObject json = (JSONObject)
new JSONParser().parse(new InputStreamReader(conn.getInputStream()));
json = (JSONObject)
json.get(DelegationTokenAuthenticator.DELEGATION_TOKEN_JSON);
String tokenStr = (String)
json.get(DelegationTokenAuthenticator.DELEGATION_TOKEN_URL_STRING_JSON);
url = new URL(TestJettyHelper.getJettyURL(),
"/webhdfs/v1/?op=GETHOMEDIRECTORY&delegation=" + tokenStr);
conn = (HttpURLConnection) url.openConnection();
Assert.assertEquals(HttpURLConnection.HTTP_OK,
conn.getResponseCode());
url = new URL(TestJettyHelper.getJettyURL(),
"/webhdfs/v1/?op=RENEWDELEGATIONTOKEN&token=" + tokenStr);
conn = (HttpURLConnection) url.openConnection();
conn.setRequestMethod("PUT");
Assert.assertEquals(HttpURLConnection.HTTP_UNAUTHORIZED,
conn.getResponseCode());
url = new URL(TestJettyHelper.getJettyURL(),
"/webhdfs/v1/?op=RENEWDELEGATIONTOKEN&token=" + tokenStr);
conn = (HttpURLConnection) url.openConnection();
conn.setRequestMethod("PUT");
conn.setRequestProperty("Cookie",
AuthenticatedURL.AUTH_COOKIE + "=" + tokenSigned);
Assert.assertEquals(HttpURLConnection.HTTP_OK,
conn.getResponseCode());
url = new URL(TestJettyHelper.getJettyURL(),
"/webhdfs/v1/?op=CANCELDELEGATIONTOKEN&token=" + tokenStr);
conn = (HttpURLConnection) url.openConnection();
conn.setRequestMethod("PUT");
Assert.assertEquals(HttpURLConnection.HTTP_OK,
conn.getResponseCode());
url = new URL(TestJettyHelper.getJettyURL(),
"/webhdfs/v1/?op=GETHOMEDIRECTORY&delegation=" + tokenStr);
conn = (HttpURLConnection) url.openConnection();
Assert.assertEquals(HttpURLConnection.HTTP_FORBIDDEN,
conn.getResponseCode());
}
}
| 28,940 | 37.130435 | 114 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServerNoACLs.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.http.server;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.test.HTestCase;
import org.apache.hadoop.test.HadoopUsersConfTestHelper;
import org.apache.hadoop.test.TestDir;
import org.apache.hadoop.test.TestDirHelper;
import org.apache.hadoop.test.TestJetty;
import org.apache.hadoop.test.TestJettyHelper;
import org.junit.Assert;
import org.junit.Test;
import org.mortbay.jetty.Server;
import org.mortbay.jetty.webapp.WebAppContext;
import java.io.BufferedReader;
import java.io.File;
import java.io.FileOutputStream;
import java.io.FileWriter;
import java.io.IOException;
import java.io.InputStreamReader;
import java.io.OutputStream;
import java.io.Writer;
import java.net.HttpURLConnection;
import java.net.URL;
import java.text.MessageFormat;
/**
* This test class ensures that everything works as expected when ACL
* support is turned off HDFS. This is the default configuration. The other
* tests operate with ACL support turned on.
*/
public class TestHttpFSServerNoACLs extends HTestCase {
private MiniDFSCluster miniDfs;
private Configuration nnConf;
/**
* Fire up our own hand-rolled MiniDFSCluster. We do this here instead
* of relying on TestHdfsHelper because we don't want to turn on ACL
* support.
*
* @throws Exception
*/
private void startMiniDFS() throws Exception {
File testDirRoot = TestDirHelper.getTestDir();
if (System.getProperty("hadoop.log.dir") == null) {
System.setProperty("hadoop.log.dir",
new File(testDirRoot, "hadoop-log").getAbsolutePath());
}
if (System.getProperty("test.build.data") == null) {
System.setProperty("test.build.data",
new File(testDirRoot, "hadoop-data").getAbsolutePath());
}
Configuration conf = HadoopUsersConfTestHelper.getBaseConf();
HadoopUsersConfTestHelper.addUserConf(conf);
conf.set("fs.hdfs.impl.disable.cache", "true");
conf.set("dfs.block.access.token.enable", "false");
conf.set("dfs.permissions", "true");
conf.set("hadoop.security.authentication", "simple");
// Explicitly turn off ACL support
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, false);
MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf);
builder.numDataNodes(2);
miniDfs = builder.build();
nnConf = miniDfs.getConfiguration(0);
}
/**
* Create an HttpFS Server to talk to the MiniDFSCluster we created.
* @throws Exception
*/
private void createHttpFSServer() throws Exception {
File homeDir = TestDirHelper.getTestDir();
Assert.assertTrue(new File(homeDir, "conf").mkdir());
Assert.assertTrue(new File(homeDir, "log").mkdir());
Assert.assertTrue(new File(homeDir, "temp").mkdir());
HttpFSServerWebApp.setHomeDirForCurrentThread(homeDir.getAbsolutePath());
File secretFile = new File(new File(homeDir, "conf"), "secret");
Writer w = new FileWriter(secretFile);
w.write("secret");
w.close();
// HDFS configuration
File hadoopConfDir = new File(new File(homeDir, "conf"), "hadoop-conf");
if ( !hadoopConfDir.mkdirs() ) {
throw new IOException();
}
String fsDefaultName =
nnConf.get(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY);
Configuration conf = new Configuration(false);
conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, fsDefaultName);
// Explicitly turn off ACLs, just in case the default becomes true later
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, false);
File hdfsSite = new File(hadoopConfDir, "hdfs-site.xml");
OutputStream os = new FileOutputStream(hdfsSite);
conf.writeXml(os);
os.close();
// HTTPFS configuration
conf = new Configuration(false);
conf.set("httpfs.hadoop.config.dir", hadoopConfDir.toString());
conf.set("httpfs.proxyuser." +
HadoopUsersConfTestHelper.getHadoopProxyUser() + ".groups",
HadoopUsersConfTestHelper.getHadoopProxyUserGroups());
conf.set("httpfs.proxyuser." +
HadoopUsersConfTestHelper.getHadoopProxyUser() + ".hosts",
HadoopUsersConfTestHelper.getHadoopProxyUserHosts());
conf.set("httpfs.authentication.signature.secret.file",
secretFile.getAbsolutePath());
File httpfsSite = new File(new File(homeDir, "conf"), "httpfs-site.xml");
os = new FileOutputStream(httpfsSite);
conf.writeXml(os);
os.close();
ClassLoader cl = Thread.currentThread().getContextClassLoader();
URL url = cl.getResource("webapp");
if ( url == null ) {
throw new IOException();
}
WebAppContext context = new WebAppContext(url.getPath(), "/webhdfs");
Server server = TestJettyHelper.getJettyServer();
server.addHandler(context);
server.start();
}
/**
* Talks to the http interface to get the json output of a *STATUS command
* on the given file.
*
* @param filename The file to query.
* @param command Either GETFILESTATUS, LISTSTATUS, or ACLSTATUS
* @param expectOK Is this operation expected to succeed?
* @throws Exception
*/
private void getStatus(String filename, String command, boolean expectOK)
throws Exception {
String user = HadoopUsersConfTestHelper.getHadoopUsers()[0];
// Remove leading / from filename
if ( filename.charAt(0) == '/' ) {
filename = filename.substring(1);
}
String pathOps = MessageFormat.format(
"/webhdfs/v1/{0}?user.name={1}&op={2}",
filename, user, command);
URL url = new URL(TestJettyHelper.getJettyURL(), pathOps);
HttpURLConnection conn = (HttpURLConnection) url.openConnection();
conn.connect();
int resp = conn.getResponseCode();
BufferedReader reader;
if ( expectOK ) {
Assert.assertEquals(HttpURLConnection.HTTP_OK, resp);
reader = new BufferedReader(new InputStreamReader(conn.getInputStream()));
String res = reader.readLine();
Assert.assertTrue(!res.contains("aclBit"));
Assert.assertTrue(res.contains("owner")); // basic sanity check
} else {
Assert.assertEquals(HttpURLConnection.HTTP_INTERNAL_ERROR, resp);
reader = new BufferedReader(new InputStreamReader(conn.getErrorStream()));
String res = reader.readLine();
Assert.assertTrue(res.contains("AclException"));
Assert.assertTrue(res.contains("Support for ACLs has been disabled"));
}
}
/**
* General-purpose http PUT command to the httpfs server.
* @param filename The file to operate upon
* @param command The command to perform (SETACL, etc)
* @param params Parameters, like "aclspec=..."
*/
private void putCmd(String filename, String command,
String params, boolean expectOK) throws Exception {
String user = HadoopUsersConfTestHelper.getHadoopUsers()[0];
// Remove leading / from filename
if ( filename.charAt(0) == '/' ) {
filename = filename.substring(1);
}
String pathOps = MessageFormat.format(
"/webhdfs/v1/{0}?user.name={1}{2}{3}&op={4}",
filename, user, (params == null) ? "" : "&",
(params == null) ? "" : params, command);
URL url = new URL(TestJettyHelper.getJettyURL(), pathOps);
HttpURLConnection conn = (HttpURLConnection) url.openConnection();
conn.setRequestMethod("PUT");
conn.connect();
int resp = conn.getResponseCode();
if ( expectOK ) {
Assert.assertEquals(HttpURLConnection.HTTP_OK, resp);
} else {
Assert.assertEquals(HttpURLConnection.HTTP_INTERNAL_ERROR, resp);
BufferedReader reader;
reader = new BufferedReader(new InputStreamReader(conn.getErrorStream()));
String err = reader.readLine();
Assert.assertTrue(err.contains("AclException"));
Assert.assertTrue(err.contains("Support for ACLs has been disabled"));
}
}
/**
* Ensure that
* <ol>
* <li>GETFILESTATUS and LISTSTATUS work happily</li>
* <li>ACLSTATUS throws an exception</li>
* <li>The ACL SET, REMOVE, etc calls all fail</li>
* </ol>
*
* @throws Exception
*/
@Test
@TestDir
@TestJetty
public void testWithNoAcls() throws Exception {
final String aclUser1 = "user:foo:rw-";
final String aclUser2 = "user:bar:r--";
final String aclGroup1 = "group::r--";
final String aclSpec = "aclspec=user::rwx," + aclUser1 + ","
+ aclGroup1 + ",other::---";
final String modAclSpec = "aclspec=" + aclUser2;
final String remAclSpec = "aclspec=" + aclUser1;
final String defUser1 = "default:user:glarch:r-x";
final String defSpec1 = "aclspec=" + defUser1;
final String dir = "/noACLs";
final String path = dir + "/foo";
startMiniDFS();
createHttpFSServer();
FileSystem fs = FileSystem.get(nnConf);
fs.mkdirs(new Path(dir));
OutputStream os = fs.create(new Path(path));
os.write(1);
os.close();
/* The normal status calls work as expected; GETACLSTATUS fails */
getStatus(path, "GETFILESTATUS", true);
getStatus(dir, "LISTSTATUS", true);
getStatus(path, "GETACLSTATUS", false);
/* All the ACL-based PUT commands fail with ACL exceptions */
putCmd(path, "SETACL", aclSpec, false);
putCmd(path, "MODIFYACLENTRIES", modAclSpec, false);
putCmd(path, "REMOVEACLENTRIES", remAclSpec, false);
putCmd(path, "REMOVEACL", null, false);
putCmd(dir, "SETACL", defSpec1, false);
putCmd(dir, "REMOVEDEFAULTACL", null, false);
miniDfs.shutdown();
}
}
| 10,572 | 36.626335 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestCheckUploadContentTypeFilter.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.http.server;
import javax.servlet.Filter;
import javax.servlet.FilterChain;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import org.apache.hadoop.fs.http.client.HttpFSFileSystem;
import org.junit.Test;
import org.mockito.Mockito;
public class TestCheckUploadContentTypeFilter {
@Test
public void putUpload() throws Exception {
test("PUT", HttpFSFileSystem.Operation.CREATE.toString(), "application/octet-stream", true, false);
}
@Test
public void postUpload() throws Exception {
test("POST", HttpFSFileSystem.Operation.APPEND.toString(), "APPLICATION/OCTET-STREAM", true, false);
}
@Test
public void putUploadWrong() throws Exception {
test("PUT", HttpFSFileSystem.Operation.CREATE.toString(), "plain/text", false, false);
test("PUT", HttpFSFileSystem.Operation.CREATE.toString(), "plain/text", true, true);
}
@Test
public void postUploadWrong() throws Exception {
test("POST", HttpFSFileSystem.Operation.APPEND.toString(), "plain/text", false, false);
test("POST", HttpFSFileSystem.Operation.APPEND.toString(), "plain/text", true, true);
}
@Test
public void getOther() throws Exception {
test("GET", HttpFSFileSystem.Operation.GETHOMEDIRECTORY.toString(), "plain/text", false, false);
}
@Test
public void putOther() throws Exception {
test("PUT", HttpFSFileSystem.Operation.MKDIRS.toString(), "plain/text", false, false);
}
private void test(String method, String operation, String contentType,
boolean upload, boolean error) throws Exception {
HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
Mockito.reset(request);
Mockito.when(request.getMethod()).thenReturn(method);
Mockito.when(request.getParameter(HttpFSFileSystem.OP_PARAM)).thenReturn(operation);
Mockito.when(request.getParameter(HttpFSParametersProvider.DataParam.NAME)).
thenReturn(Boolean.toString(upload));
Mockito.when(request.getContentType()).thenReturn(contentType);
FilterChain chain = Mockito.mock(FilterChain.class);
Filter filter = new CheckUploadContentTypeFilter();
filter.doFilter(request, response, chain);
if (error) {
Mockito.verify(response).sendError(Mockito.eq(HttpServletResponse.SC_BAD_REQUEST),
Mockito.contains("Data upload"));
}
else {
Mockito.verify(chain).doFilter(request, response);
}
}
}
| 3,381 | 35.76087 | 104 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/lang/TestXException.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.lib.lang;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNull;
import org.apache.hadoop.test.HTestCase;
import org.junit.Test;
public class TestXException extends HTestCase {
public static enum TestERROR implements XException.ERROR {
TC;
@Override
public String getTemplate() {
return "{0}";
}
}
@Test
public void testXException() throws Exception {
XException ex = new XException(TestERROR.TC);
assertEquals(ex.getError(), TestERROR.TC);
assertEquals(ex.getMessage(), "TC: {0}");
assertNull(ex.getCause());
ex = new XException(TestERROR.TC, "msg");
assertEquals(ex.getError(), TestERROR.TC);
assertEquals(ex.getMessage(), "TC: msg");
assertNull(ex.getCause());
Exception cause = new Exception();
ex = new XException(TestERROR.TC, cause);
assertEquals(ex.getError(), TestERROR.TC);
assertEquals(ex.getMessage(), "TC: " + cause.toString());
assertEquals(ex.getCause(), cause);
XException xcause = ex;
ex = new XException(xcause);
assertEquals(ex.getError(), TestERROR.TC);
assertEquals(ex.getMessage(), xcause.getMessage());
assertEquals(ex.getCause(), xcause);
}
}
| 2,049 | 30.538462 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/lang/TestRunnableCallable.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.lib.lang;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import java.util.concurrent.Callable;
import org.apache.hadoop.test.HTestCase;
import org.junit.Test;
public class TestRunnableCallable extends HTestCase {
public static class R implements Runnable {
boolean RUN;
@Override
public void run() {
RUN = true;
}
}
public static class C implements Callable {
boolean RUN;
@Override
public Object call() throws Exception {
RUN = true;
return null;
}
}
public static class CEx implements Callable {
@Override
public Object call() throws Exception {
throw new Exception();
}
}
@Test
public void runnable() throws Exception {
R r = new R();
RunnableCallable rc = new RunnableCallable(r);
rc.run();
assertTrue(r.RUN);
r = new R();
rc = new RunnableCallable(r);
rc.call();
assertTrue(r.RUN);
assertEquals(rc.toString(), "R");
}
@Test
public void callable() throws Exception {
C c = new C();
RunnableCallable rc = new RunnableCallable(c);
rc.run();
assertTrue(c.RUN);
c = new C();
rc = new RunnableCallable(c);
rc.call();
assertTrue(c.RUN);
assertEquals(rc.toString(), "C");
}
@Test(expected = RuntimeException.class)
public void callableExRun() throws Exception {
CEx c = new CEx();
RunnableCallable rc = new RunnableCallable(c);
rc.run();
}
}
| 2,315 | 22.876289 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/service/hadoop/TestFileSystemAccessService.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.lib.service.hadoop;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.OutputStream;
import java.util.Arrays;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.lib.server.Server;
import org.apache.hadoop.lib.server.ServiceException;
import org.apache.hadoop.lib.service.FileSystemAccess;
import org.apache.hadoop.lib.service.FileSystemAccessException;
import org.apache.hadoop.lib.service.instrumentation.InstrumentationService;
import org.apache.hadoop.lib.service.scheduler.SchedulerService;
import org.apache.hadoop.test.HFSTestCase;
import org.apache.hadoop.test.TestDir;
import org.apache.hadoop.test.TestDirHelper;
import org.apache.hadoop.test.TestException;
import org.apache.hadoop.test.TestHdfs;
import org.apache.hadoop.test.TestHdfsHelper;
import org.apache.hadoop.util.StringUtils;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
public class TestFileSystemAccessService extends HFSTestCase {
private void createHadoopConf(Configuration hadoopConf) throws Exception {
String dir = TestDirHelper.getTestDir().getAbsolutePath();
File hdfsSite = new File(dir, "hdfs-site.xml");
OutputStream os = new FileOutputStream(hdfsSite);
hadoopConf.writeXml(os);
os.close();
}
@Before
public void createHadoopConf() throws Exception {
Configuration hadoopConf = new Configuration(false);
hadoopConf.set("foo", "FOO");
createHadoopConf(hadoopConf);
}
@Test
@TestDir
public void simpleSecurity() throws Exception {
String dir = TestDirHelper.getTestDir().getAbsolutePath();
String services = StringUtils.join(",",
Arrays.asList(InstrumentationService.class.getName(),
SchedulerService.class.getName(),
FileSystemAccessService.class.getName()));
Configuration conf = new Configuration(false);
conf.set("server.services", services);
Server server = new Server("server", dir, dir, dir, dir, conf);
server.init();
Assert.assertNotNull(server.get(FileSystemAccess.class));
server.destroy();
}
@Test
@TestException(exception = ServiceException.class, msgRegExp = "H01.*")
@TestDir
public void noKerberosKeytabProperty() throws Exception {
String dir = TestDirHelper.getTestDir().getAbsolutePath();
String services = StringUtils.join(",",
Arrays.asList(InstrumentationService.class.getName(),
SchedulerService.class.getName(),
FileSystemAccessService.class.getName()));
Configuration conf = new Configuration(false);
conf.set("server.services", services);
conf.set("server.hadoop.authentication.type", "kerberos");
conf.set("server.hadoop.authentication.kerberos.keytab", " ");
Server server = new Server("server", dir, dir, dir, dir, conf);
server.init();
}
@Test
@TestException(exception = ServiceException.class, msgRegExp = "H01.*")
@TestDir
public void noKerberosPrincipalProperty() throws Exception {
String dir = TestDirHelper.getTestDir().getAbsolutePath();
String services = StringUtils.join(",",
Arrays.asList(InstrumentationService.class.getName(),
SchedulerService.class.getName(),
FileSystemAccessService.class.getName()));
Configuration conf = new Configuration(false);
conf.set("server.services", services);
conf.set("server.hadoop.authentication.type", "kerberos");
conf.set("server.hadoop.authentication.kerberos.keytab", "/tmp/foo");
conf.set("server.hadoop.authentication.kerberos.principal", " ");
Server server = new Server("server", dir, dir, dir, dir, conf);
server.init();
}
@Test
@TestException(exception = ServiceException.class, msgRegExp = "H02.*")
@TestDir
public void kerberosInitializationFailure() throws Exception {
String dir = TestDirHelper.getTestDir().getAbsolutePath();
String services = StringUtils.join(",",
Arrays.asList(InstrumentationService.class.getName(),
SchedulerService.class.getName(),
FileSystemAccessService.class.getName()));
Configuration conf = new Configuration(false);
conf.set("server.services", services);
conf.set("server.hadoop.authentication.type", "kerberos");
conf.set("server.hadoop.authentication.kerberos.keytab", "/tmp/foo");
conf.set("server.hadoop.authentication.kerberos.principal", "foo@FOO");
Server server = new Server("server", dir, dir, dir, dir, conf);
server.init();
}
@Test
@TestException(exception = ServiceException.class, msgRegExp = "H09.*")
@TestDir
public void invalidSecurity() throws Exception {
String dir = TestDirHelper.getTestDir().getAbsolutePath();
String services = StringUtils.join(",",
Arrays.asList(InstrumentationService.class.getName(),
SchedulerService.class.getName(),
FileSystemAccessService.class.getName()));
Configuration conf = new Configuration(false);
conf.set("server.services", services);
conf.set("server.hadoop.authentication.type", "foo");
Server server = new Server("server", dir, dir, dir, dir, conf);
server.init();
}
@Test
@TestDir
public void serviceHadoopConf() throws Exception {
String dir = TestDirHelper.getTestDir().getAbsolutePath();
String services = StringUtils.join(",",
Arrays.asList(InstrumentationService.class.getName(),
SchedulerService.class.getName(),
FileSystemAccessService.class.getName()));
Configuration conf = new Configuration(false);
conf.set("server.services", services);
Server server = new Server("server", dir, dir, dir, dir, conf);
server.init();
FileSystemAccessService fsAccess = (FileSystemAccessService) server.get(FileSystemAccess.class);
Assert.assertEquals(fsAccess.serviceHadoopConf.get("foo"), "FOO");
server.destroy();
}
@Test
@TestDir
public void serviceHadoopConfCustomDir() throws Exception {
String dir = TestDirHelper.getTestDir().getAbsolutePath();
String hadoopConfDir = new File(dir, "confx").getAbsolutePath();
new File(hadoopConfDir).mkdirs();
String services = StringUtils.join(",",
Arrays.asList(InstrumentationService.class.getName(),
SchedulerService.class.getName(),
FileSystemAccessService.class.getName()));
Configuration conf = new Configuration(false);
conf.set("server.services", services);
conf.set("server.hadoop.config.dir", hadoopConfDir);
File hdfsSite = new File(hadoopConfDir, "hdfs-site.xml");
OutputStream os = new FileOutputStream(hdfsSite);
Configuration hadoopConf = new Configuration(false);
hadoopConf.set("foo", "BAR");
hadoopConf.writeXml(os);
os.close();
Server server = new Server("server", dir, dir, dir, dir, conf);
server.init();
FileSystemAccessService fsAccess = (FileSystemAccessService) server.get(FileSystemAccess.class);
Assert.assertEquals(fsAccess.serviceHadoopConf.get("foo"), "BAR");
server.destroy();
}
@Test
@TestDir
public void inWhitelists() throws Exception {
String dir = TestDirHelper.getTestDir().getAbsolutePath();
String services = StringUtils.join(",",
Arrays.asList(InstrumentationService.class.getName(),
SchedulerService.class.getName(),
FileSystemAccessService.class.getName()));
Configuration conf = new Configuration(false);
conf.set("server.services", services);
Server server = new Server("server", dir, dir, dir, dir, conf);
server.init();
FileSystemAccessService fsAccess = (FileSystemAccessService) server.get(FileSystemAccess.class);
fsAccess.validateNamenode("NN");
server.destroy();
conf = new Configuration(false);
conf.set("server.services", services);
conf.set("server.hadoop.name.node.whitelist", "*");
server = new Server("server", dir, dir, dir, dir, conf);
server.init();
fsAccess = (FileSystemAccessService) server.get(FileSystemAccess.class);
fsAccess.validateNamenode("NN");
server.destroy();
conf = new Configuration(false);
conf.set("server.services", services);
conf.set("server.hadoop.name.node.whitelist", "NN");
server = new Server("server", dir, dir, dir, dir, conf);
server.init();
fsAccess = (FileSystemAccessService) server.get(FileSystemAccess.class);
fsAccess.validateNamenode("NN");
server.destroy();
}
@Test
@TestException(exception = FileSystemAccessException.class, msgRegExp = "H05.*")
@TestDir
public void NameNodeNotinWhitelists() throws Exception {
String dir = TestDirHelper.getTestDir().getAbsolutePath();
String services = StringUtils.join(",",
Arrays.asList(InstrumentationService.class.getName(),
SchedulerService.class.getName(),
FileSystemAccessService.class.getName()));
Configuration conf = new Configuration(false);
conf.set("server.services", services);
conf.set("server.hadoop.name.node.whitelist", "NN");
Server server = new Server("server", dir, dir, dir, dir, conf);
server.init();
FileSystemAccessService fsAccess = (FileSystemAccessService) server.get(FileSystemAccess.class);
fsAccess.validateNamenode("NNx");
}
@Test
@TestDir
@TestHdfs
public void createFileSystem() throws Exception {
String dir = TestDirHelper.getTestDir().getAbsolutePath();
String services = StringUtils.join(",",
Arrays.asList(InstrumentationService.class.getName(),
SchedulerService.class.getName(),
FileSystemAccessService.class.getName()));
Configuration hadoopConf = new Configuration(false);
hadoopConf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, TestHdfsHelper.getHdfsConf().get(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY));
createHadoopConf(hadoopConf);
Configuration conf = new Configuration(false);
conf.set("server.services", services);
conf.set("server.hadoop.filesystem.cache.purge.timeout", "0");
Server server = new Server("server", dir, dir, dir, dir, conf);
server.init();
FileSystemAccess hadoop = server.get(FileSystemAccess.class);
FileSystem fs = hadoop.createFileSystem("u", hadoop.getFileSystemConfiguration());
Assert.assertNotNull(fs);
fs.mkdirs(new Path("/tmp/foo"));
hadoop.releaseFileSystem(fs);
try {
fs.mkdirs(new Path("/tmp/foo"));
Assert.fail();
} catch (IOException ex) {
} catch (Exception ex) {
Assert.fail();
}
server.destroy();
}
@Test
@TestDir
@TestHdfs
public void fileSystemExecutor() throws Exception {
String dir = TestDirHelper.getTestDir().getAbsolutePath();
String services = StringUtils.join(",",
Arrays.asList(InstrumentationService.class.getName(),
SchedulerService.class.getName(),
FileSystemAccessService.class.getName()));
Configuration hadoopConf = new Configuration(false);
hadoopConf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, TestHdfsHelper.getHdfsConf().get(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY));
createHadoopConf(hadoopConf);
Configuration conf = new Configuration(false);
conf.set("server.services", services);
conf.set("server.hadoop.filesystem.cache.purge.timeout", "0");
Server server = new Server("server", dir, dir, dir, dir, conf);
server.init();
FileSystemAccess hadoop = server.get(FileSystemAccess.class);
final FileSystem fsa[] = new FileSystem[1];
hadoop.execute("u", hadoop.getFileSystemConfiguration(), new FileSystemAccess.FileSystemExecutor<Void>() {
@Override
public Void execute(FileSystem fs) throws IOException {
fs.mkdirs(new Path("/tmp/foo"));
fsa[0] = fs;
return null;
}
});
try {
fsa[0].mkdirs(new Path("/tmp/foo"));
Assert.fail();
} catch (IOException ex) {
} catch (Exception ex) {
Assert.fail();
}
server.destroy();
}
@Test
@TestException(exception = FileSystemAccessException.class, msgRegExp = "H06.*")
@TestDir
@TestHdfs
public void fileSystemExecutorNoNameNode() throws Exception {
String dir = TestDirHelper.getTestDir().getAbsolutePath();
String services = StringUtils.join(",",
Arrays.asList(InstrumentationService.class.getName(),
SchedulerService.class.getName(),
FileSystemAccessService.class.getName()));
Configuration hadoopConf = new Configuration(false);
hadoopConf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, TestHdfsHelper.getHdfsConf().get(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY));
createHadoopConf(hadoopConf);
Configuration conf = new Configuration(false);
conf.set("server.services", services);
Server server = new Server("server", dir, dir, dir, dir, conf);
server.init();
FileSystemAccess fsAccess = server.get(FileSystemAccess.class);
Configuration hdfsConf = fsAccess.getFileSystemConfiguration();
hdfsConf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, "");
fsAccess.execute("u", hdfsConf, new FileSystemAccess.FileSystemExecutor<Void>() {
@Override
public Void execute(FileSystem fs) throws IOException {
return null;
}
});
}
@Test
@TestDir
@TestHdfs
public void fileSystemExecutorException() throws Exception {
String dir = TestDirHelper.getTestDir().getAbsolutePath();
String services = StringUtils.join(",",
Arrays.asList(InstrumentationService.class.getName(),
SchedulerService.class.getName(),
FileSystemAccessService.class.getName()));
Configuration hadoopConf = new Configuration(false);
hadoopConf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, TestHdfsHelper.getHdfsConf().get(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY));
createHadoopConf(hadoopConf);
Configuration conf = new Configuration(false);
conf.set("server.services", services);
conf.set("server.hadoop.filesystem.cache.purge.timeout", "0");
Server server = new Server("server", dir, dir, dir, dir, conf);
server.init();
FileSystemAccess hadoop = server.get(FileSystemAccess.class);
final FileSystem fsa[] = new FileSystem[1];
try {
hadoop.execute("u", hadoop.getFileSystemConfiguration(), new FileSystemAccess.FileSystemExecutor<Void>() {
@Override
public Void execute(FileSystem fs) throws IOException {
fsa[0] = fs;
throw new IOException();
}
});
Assert.fail();
} catch (FileSystemAccessException ex) {
Assert.assertEquals(ex.getError(), FileSystemAccessException.ERROR.H03);
} catch (Exception ex) {
Assert.fail();
}
try {
fsa[0].mkdirs(new Path("/tmp/foo"));
Assert.fail();
} catch (IOException ex) {
} catch (Exception ex) {
Assert.fail();
}
server.destroy();
}
@Test
@TestDir
@TestHdfs
public void fileSystemCache() throws Exception {
String dir = TestDirHelper.getTestDir().getAbsolutePath();
String services = StringUtils.join(",",
Arrays.asList(InstrumentationService.class.getName(),
SchedulerService.class.getName(),
FileSystemAccessService.class.getName()));
Configuration hadoopConf = new Configuration(false);
hadoopConf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY,
TestHdfsHelper.getHdfsConf().get(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY));
createHadoopConf(hadoopConf);
Configuration conf = new Configuration(false);
conf.set("server.services", services);
conf.set("server.hadoop.filesystem.cache.purge.frequency", "1");
conf.set("server.hadoop.filesystem.cache.purge.timeout", "1");
Server server = new Server("server", dir, dir, dir, dir, conf);
try {
server.init();
FileSystemAccess hadoop = server.get(FileSystemAccess.class);
FileSystem fs1 =
hadoop.createFileSystem("u", hadoop.getFileSystemConfiguration());
Assert.assertNotNull(fs1);
fs1.mkdirs(new Path("/tmp/foo1"));
hadoop.releaseFileSystem(fs1);
//still around because of caching
fs1.mkdirs(new Path("/tmp/foo2"));
FileSystem fs2 =
hadoop.createFileSystem("u", hadoop.getFileSystemConfiguration());
//should be same instance because of caching
Assert.assertEquals(fs1, fs2);
Thread.sleep(4 * 1000);
//still around because of lease count is 1 (fs2 is out)
fs1.mkdirs(new Path("/tmp/foo2"));
Thread.sleep(4 * 1000);
//still around because of lease count is 1 (fs2 is out)
fs2.mkdirs(new Path("/tmp/foo"));
hadoop.releaseFileSystem(fs2);
Thread.sleep(4 * 1000);
//should not be around as lease count is 0
try {
fs2.mkdirs(new Path("/tmp/foo"));
Assert.fail();
} catch (IOException ex) {
} catch (Exception ex) {
Assert.fail();
}
} finally {
server.destroy();
}
}
}
| 18,143 | 37.852248 | 155 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/service/scheduler/TestSchedulerService.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.lib.service.scheduler;
import static org.junit.Assert.assertNotNull;
import java.util.Arrays;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.lib.server.Server;
import org.apache.hadoop.lib.service.Scheduler;
import org.apache.hadoop.lib.service.instrumentation.InstrumentationService;
import org.apache.hadoop.test.HTestCase;
import org.apache.hadoop.test.TestDir;
import org.apache.hadoop.test.TestDirHelper;
import org.apache.hadoop.util.StringUtils;
import org.junit.Test;
public class TestSchedulerService extends HTestCase {
@Test
@TestDir
public void service() throws Exception {
String dir = TestDirHelper.getTestDir().getAbsolutePath();
Configuration conf = new Configuration(false);
conf.set("server.services", StringUtils.join(",", Arrays.asList(InstrumentationService.class.getName(),
SchedulerService.class.getName())));
Server server = new Server("server", dir, dir, dir, dir, conf);
server.init();
assertNotNull(server.get(Scheduler.class));
server.destroy();
}
}
| 1,945 | 37.156863 | 107 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/service/security/TestGroupsService.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.lib.service.security;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNotSame;
import java.util.Arrays;
import java.util.List;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.lib.server.Server;
import org.apache.hadoop.lib.service.Groups;
import org.apache.hadoop.test.HTestCase;
import org.apache.hadoop.test.TestDir;
import org.apache.hadoop.test.TestDirHelper;
import org.apache.hadoop.util.StringUtils;
import org.junit.Test;
public class TestGroupsService extends HTestCase {
@Test
@TestDir
public void service() throws Exception {
String dir = TestDirHelper.getTestDir().getAbsolutePath();
Configuration conf = new Configuration(false);
conf.set("server.services", StringUtils.join(",", Arrays.asList(GroupsService.class.getName())));
Server server = new Server("server", dir, dir, dir, dir, conf);
server.init();
Groups groups = server.get(Groups.class);
assertNotNull(groups);
List<String> g = groups.getGroups(System.getProperty("user.name"));
assertNotSame(g.size(), 0);
server.destroy();
}
@Test(expected = RuntimeException.class)
@TestDir
public void invalidGroupsMapping() throws Exception {
String dir = TestDirHelper.getTestDir().getAbsolutePath();
Configuration conf = new Configuration(false);
conf.set("server.services", StringUtils.join(",", Arrays.asList(GroupsService.class.getName())));
conf.set("server.groups.hadoop.security.group.mapping", String.class.getName());
Server server = new Server("server", dir, dir, dir, dir, conf);
server.init();
}
}
| 2,451 | 36.723077 | 101 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/service/security/DummyGroupMapping.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.lib.service.security;
import java.io.IOException;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import org.apache.hadoop.security.GroupMappingServiceProvider;
import org.apache.hadoop.test.HadoopUsersConfTestHelper;
public class DummyGroupMapping implements GroupMappingServiceProvider {
@Override
@SuppressWarnings("unchecked")
public List<String> getGroups(String user) throws IOException {
if (user.equals("root")) {
return Arrays.asList("admin");
}
else if (user.equals("nobody")) {
return Arrays.asList("nobody");
} else {
String[] groups = HadoopUsersConfTestHelper.getHadoopUserGroups(user);
return (groups != null) ? Arrays.asList(groups) : Collections.EMPTY_LIST;
}
}
@Override
public void cacheGroupsRefresh() throws IOException {
}
@Override
public void cacheGroupsAdd(List<String> groups) throws IOException {
}
}
| 1,761 | 32.884615 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/service/instrumentation/TestInstrumentationService.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.lib.service.instrumentation;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.io.StringWriter;
import java.util.Arrays;
import java.util.Map;
import java.util.concurrent.atomic.AtomicInteger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.lib.server.Server;
import org.apache.hadoop.lib.service.Instrumentation;
import org.apache.hadoop.lib.service.scheduler.SchedulerService;
import org.apache.hadoop.test.HTestCase;
import org.apache.hadoop.test.TestDir;
import org.apache.hadoop.test.TestDirHelper;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.Time;
import org.json.simple.JSONObject;
import org.json.simple.parser.JSONParser;
import org.junit.Test;
public class TestInstrumentationService extends HTestCase {
@Override
protected float getWaitForRatio() {
return 1;
}
@Test
public void cron() {
InstrumentationService.Cron cron = new InstrumentationService.Cron();
assertEquals(cron.start, 0);
assertEquals(cron.lapStart, 0);
assertEquals(cron.own, 0);
assertEquals(cron.total, 0);
long begin = Time.now();
assertEquals(cron.start(), cron);
assertEquals(cron.start(), cron);
assertEquals(cron.start, begin, 20);
assertEquals(cron.start, cron.lapStart);
sleep(100);
assertEquals(cron.stop(), cron);
long end = Time.now();
long delta = end - begin;
assertEquals(cron.own, delta, 20);
assertEquals(cron.total, 0);
assertEquals(cron.lapStart, 0);
sleep(100);
long reStart = Time.now();
cron.start();
assertEquals(cron.start, begin, 20);
assertEquals(cron.lapStart, reStart, 20);
sleep(100);
cron.stop();
long reEnd = Time.now();
delta += reEnd - reStart;
assertEquals(cron.own, delta, 20);
assertEquals(cron.total, 0);
assertEquals(cron.lapStart, 0);
cron.end();
assertEquals(cron.total, reEnd - begin, 20);
try {
cron.start();
fail();
} catch (IllegalStateException ex) {
} catch (Exception ex) {
fail();
}
try {
cron.stop();
fail();
} catch (IllegalStateException ex) {
} catch (Exception ex) {
fail();
}
}
@Test
public void timer() throws Exception {
InstrumentationService.Timer timer = new InstrumentationService.Timer(2);
InstrumentationService.Cron cron = new InstrumentationService.Cron();
long ownStart;
long ownEnd;
long totalStart;
long totalEnd;
long ownDelta;
long totalDelta;
long avgTotal;
long avgOwn;
cron.start();
ownStart = Time.now();
totalStart = ownStart;
ownDelta = 0;
sleep(100);
cron.stop();
ownEnd = Time.now();
ownDelta += ownEnd - ownStart;
sleep(100);
cron.start();
ownStart = Time.now();
sleep(100);
cron.stop();
ownEnd = Time.now();
ownDelta += ownEnd - ownStart;
totalEnd = ownEnd;
totalDelta = totalEnd - totalStart;
avgTotal = totalDelta;
avgOwn = ownDelta;
timer.addCron(cron);
long[] values = timer.getValues();
assertEquals(values[InstrumentationService.Timer.LAST_TOTAL], totalDelta, 20);
assertEquals(values[InstrumentationService.Timer.LAST_OWN], ownDelta, 20);
assertEquals(values[InstrumentationService.Timer.AVG_TOTAL], avgTotal, 20);
assertEquals(values[InstrumentationService.Timer.AVG_OWN], avgOwn, 20);
cron = new InstrumentationService.Cron();
cron.start();
ownStart = Time.now();
totalStart = ownStart;
ownDelta = 0;
sleep(200);
cron.stop();
ownEnd = Time.now();
ownDelta += ownEnd - ownStart;
sleep(200);
cron.start();
ownStart = Time.now();
sleep(200);
cron.stop();
ownEnd = Time.now();
ownDelta += ownEnd - ownStart;
totalEnd = ownEnd;
totalDelta = totalEnd - totalStart;
avgTotal = (avgTotal * 1 + totalDelta) / 2;
avgOwn = (avgOwn * 1 + ownDelta) / 2;
timer.addCron(cron);
values = timer.getValues();
assertEquals(values[InstrumentationService.Timer.LAST_TOTAL], totalDelta, 20);
assertEquals(values[InstrumentationService.Timer.LAST_OWN], ownDelta, 20);
assertEquals(values[InstrumentationService.Timer.AVG_TOTAL], avgTotal, 20);
assertEquals(values[InstrumentationService.Timer.AVG_OWN], avgOwn, 20);
avgTotal = totalDelta;
avgOwn = ownDelta;
cron = new InstrumentationService.Cron();
cron.start();
ownStart = Time.now();
totalStart = ownStart;
ownDelta = 0;
sleep(300);
cron.stop();
ownEnd = Time.now();
ownDelta += ownEnd - ownStart;
sleep(300);
cron.start();
ownStart = Time.now();
sleep(300);
cron.stop();
ownEnd = Time.now();
ownDelta += ownEnd - ownStart;
totalEnd = ownEnd;
totalDelta = totalEnd - totalStart;
avgTotal = (avgTotal * 1 + totalDelta) / 2;
avgOwn = (avgOwn * 1 + ownDelta) / 2;
cron.stop();
timer.addCron(cron);
values = timer.getValues();
assertEquals(values[InstrumentationService.Timer.LAST_TOTAL], totalDelta, 20);
assertEquals(values[InstrumentationService.Timer.LAST_OWN], ownDelta, 20);
assertEquals(values[InstrumentationService.Timer.AVG_TOTAL], avgTotal, 20);
assertEquals(values[InstrumentationService.Timer.AVG_OWN], avgOwn, 20);
JSONObject json = (JSONObject) new JSONParser().parse(timer.toJSONString());
assertEquals(json.size(), 4);
assertEquals(json.get("lastTotal"), values[InstrumentationService.Timer.LAST_TOTAL]);
assertEquals(json.get("lastOwn"), values[InstrumentationService.Timer.LAST_OWN]);
assertEquals(json.get("avgTotal"), values[InstrumentationService.Timer.AVG_TOTAL]);
assertEquals(json.get("avgOwn"), values[InstrumentationService.Timer.AVG_OWN]);
StringWriter writer = new StringWriter();
timer.writeJSONString(writer);
writer.close();
json = (JSONObject) new JSONParser().parse(writer.toString());
assertEquals(json.size(), 4);
assertEquals(json.get("lastTotal"), values[InstrumentationService.Timer.LAST_TOTAL]);
assertEquals(json.get("lastOwn"), values[InstrumentationService.Timer.LAST_OWN]);
assertEquals(json.get("avgTotal"), values[InstrumentationService.Timer.AVG_TOTAL]);
assertEquals(json.get("avgOwn"), values[InstrumentationService.Timer.AVG_OWN]);
}
@Test
public void sampler() throws Exception {
final long value[] = new long[1];
Instrumentation.Variable<Long> var = new Instrumentation.Variable<Long>() {
@Override
public Long getValue() {
return value[0];
}
};
InstrumentationService.Sampler sampler = new InstrumentationService.Sampler();
sampler.init(4, var);
assertEquals(sampler.getRate(), 0f, 0.0001);
sampler.sample();
assertEquals(sampler.getRate(), 0f, 0.0001);
value[0] = 1;
sampler.sample();
assertEquals(sampler.getRate(), (0d + 1) / 2, 0.0001);
value[0] = 2;
sampler.sample();
assertEquals(sampler.getRate(), (0d + 1 + 2) / 3, 0.0001);
value[0] = 3;
sampler.sample();
assertEquals(sampler.getRate(), (0d + 1 + 2 + 3) / 4, 0.0001);
value[0] = 4;
sampler.sample();
assertEquals(sampler.getRate(), (4d + 1 + 2 + 3) / 4, 0.0001);
JSONObject json = (JSONObject) new JSONParser().parse(sampler.toJSONString());
assertEquals(json.size(), 2);
assertEquals(json.get("sampler"), sampler.getRate());
assertEquals(json.get("size"), 4L);
StringWriter writer = new StringWriter();
sampler.writeJSONString(writer);
writer.close();
json = (JSONObject) new JSONParser().parse(writer.toString());
assertEquals(json.size(), 2);
assertEquals(json.get("sampler"), sampler.getRate());
assertEquals(json.get("size"), 4L);
}
@Test
public void variableHolder() throws Exception {
InstrumentationService.VariableHolder<String> variableHolder =
new InstrumentationService.VariableHolder<String>();
variableHolder.var = new Instrumentation.Variable<String>() {
@Override
public String getValue() {
return "foo";
}
};
JSONObject json = (JSONObject) new JSONParser().parse(variableHolder.toJSONString());
assertEquals(json.size(), 1);
assertEquals(json.get("value"), "foo");
StringWriter writer = new StringWriter();
variableHolder.writeJSONString(writer);
writer.close();
json = (JSONObject) new JSONParser().parse(writer.toString());
assertEquals(json.size(), 1);
assertEquals(json.get("value"), "foo");
}
@Test
@TestDir
@SuppressWarnings("unchecked")
public void service() throws Exception {
String dir = TestDirHelper.getTestDir().getAbsolutePath();
String services = StringUtils.join(",", Arrays.asList(InstrumentationService.class.getName()));
Configuration conf = new Configuration(false);
conf.set("server.services", services);
Server server = new Server("server", dir, dir, dir, dir, conf);
server.init();
Instrumentation instrumentation = server.get(Instrumentation.class);
assertNotNull(instrumentation);
instrumentation.incr("g", "c", 1);
instrumentation.incr("g", "c", 2);
instrumentation.incr("g", "c1", 2);
Instrumentation.Cron cron = instrumentation.createCron();
cron.start();
sleep(100);
cron.stop();
instrumentation.addCron("g", "t", cron);
cron = instrumentation.createCron();
cron.start();
sleep(200);
cron.stop();
instrumentation.addCron("g", "t", cron);
Instrumentation.Variable<String> var = new Instrumentation.Variable<String>() {
@Override
public String getValue() {
return "foo";
}
};
instrumentation.addVariable("g", "v", var);
Instrumentation.Variable<Long> varToSample = new Instrumentation.Variable<Long>() {
@Override
public Long getValue() {
return 1L;
}
};
instrumentation.addSampler("g", "s", 10, varToSample);
Map<String, ?> snapshot = instrumentation.getSnapshot();
assertNotNull(snapshot.get("os-env"));
assertNotNull(snapshot.get("sys-props"));
assertNotNull(snapshot.get("jvm"));
assertNotNull(snapshot.get("counters"));
assertNotNull(snapshot.get("timers"));
assertNotNull(snapshot.get("variables"));
assertNotNull(snapshot.get("samplers"));
assertNotNull(((Map<String, String>) snapshot.get("os-env")).get("PATH"));
assertNotNull(((Map<String, String>) snapshot.get("sys-props")).get("java.version"));
assertNotNull(((Map<String, ?>) snapshot.get("jvm")).get("free.memory"));
assertNotNull(((Map<String, ?>) snapshot.get("jvm")).get("max.memory"));
assertNotNull(((Map<String, ?>) snapshot.get("jvm")).get("total.memory"));
assertNotNull(((Map<String, Map<String, Object>>) snapshot.get("counters")).get("g"));
assertNotNull(((Map<String, Map<String, Object>>) snapshot.get("timers")).get("g"));
assertNotNull(((Map<String, Map<String, Object>>) snapshot.get("variables")).get("g"));
assertNotNull(((Map<String, Map<String, Object>>) snapshot.get("samplers")).get("g"));
assertNotNull(((Map<String, Map<String, Object>>) snapshot.get("counters")).get("g").get("c"));
assertNotNull(((Map<String, Map<String, Object>>) snapshot.get("counters")).get("g").get("c1"));
assertNotNull(((Map<String, Map<String, Object>>) snapshot.get("timers")).get("g").get("t"));
assertNotNull(((Map<String, Map<String, Object>>) snapshot.get("variables")).get("g").get("v"));
assertNotNull(((Map<String, Map<String, Object>>) snapshot.get("samplers")).get("g").get("s"));
StringWriter writer = new StringWriter();
JSONObject.writeJSONString(snapshot, writer);
writer.close();
server.destroy();
}
@Test
@TestDir
@SuppressWarnings("unchecked")
public void sampling() throws Exception {
String dir = TestDirHelper.getTestDir().getAbsolutePath();
String services = StringUtils.join(",", Arrays.asList(InstrumentationService.class.getName(),
SchedulerService.class.getName()));
Configuration conf = new Configuration(false);
conf.set("server.services", services);
Server server = new Server("server", dir, dir, dir, dir, conf);
server.init();
Instrumentation instrumentation = server.get(Instrumentation.class);
final AtomicInteger count = new AtomicInteger();
Instrumentation.Variable<Long> varToSample = new Instrumentation.Variable<Long>() {
@Override
public Long getValue() {
return (long) count.incrementAndGet();
}
};
instrumentation.addSampler("g", "s", 10, varToSample);
sleep(2000);
int i = count.get();
assertTrue(i > 0);
Map<String, Map<String, ?>> snapshot = instrumentation.getSnapshot();
Map<String, Map<String, Object>> samplers = (Map<String, Map<String, Object>>) snapshot.get("samplers");
InstrumentationService.Sampler sampler = (InstrumentationService.Sampler) samplers.get("g").get("s");
assertTrue(sampler.getRate() > 0);
server.destroy();
}
}
| 13,985 | 33.112195 | 108 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/util/TestConfigurationUtils.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.lib.util;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNull;
import java.io.ByteArrayInputStream;
import java.io.IOException;
import java.io.InputStream;
import org.apache.hadoop.conf.Configuration;
import org.junit.Test;
public class TestConfigurationUtils {
@Test
public void constructors() throws Exception {
Configuration conf = new Configuration(false);
assertEquals(conf.size(), 0);
byte[] bytes = "<configuration><property><name>a</name><value>A</value></property></configuration>".getBytes();
InputStream is = new ByteArrayInputStream(bytes);
conf = new Configuration(false);
ConfigurationUtils.load(conf, is);
assertEquals(conf.size(), 1);
assertEquals(conf.get("a"), "A");
}
@Test(expected = IOException.class)
public void constructorsFail3() throws Exception {
InputStream is = new ByteArrayInputStream("<xonfiguration></xonfiguration>".getBytes());
Configuration conf = new Configuration(false);
ConfigurationUtils.load(conf, is);
}
@Test
public void copy() throws Exception {
Configuration srcConf = new Configuration(false);
Configuration targetConf = new Configuration(false);
srcConf.set("testParameter1", "valueFromSource");
srcConf.set("testParameter2", "valueFromSource");
targetConf.set("testParameter2", "valueFromTarget");
targetConf.set("testParameter3", "valueFromTarget");
ConfigurationUtils.copy(srcConf, targetConf);
assertEquals("valueFromSource", targetConf.get("testParameter1"));
assertEquals("valueFromSource", targetConf.get("testParameter2"));
assertEquals("valueFromTarget", targetConf.get("testParameter3"));
}
@Test
public void injectDefaults() throws Exception {
Configuration srcConf = new Configuration(false);
Configuration targetConf = new Configuration(false);
srcConf.set("testParameter1", "valueFromSource");
srcConf.set("testParameter2", "valueFromSource");
targetConf.set("testParameter2", "originalValueFromTarget");
targetConf.set("testParameter3", "originalValueFromTarget");
ConfigurationUtils.injectDefaults(srcConf, targetConf);
assertEquals("valueFromSource", targetConf.get("testParameter1"));
assertEquals("originalValueFromTarget", targetConf.get("testParameter2"));
assertEquals("originalValueFromTarget", targetConf.get("testParameter3"));
assertEquals("valueFromSource", srcConf.get("testParameter1"));
assertEquals("valueFromSource", srcConf.get("testParameter2"));
assertNull(srcConf.get("testParameter3"));
}
@Test
public void resolve() {
Configuration conf = new Configuration(false);
conf.set("a", "A");
conf.set("b", "${a}");
assertEquals(conf.getRaw("a"), "A");
assertEquals(conf.getRaw("b"), "${a}");
conf = ConfigurationUtils.resolve(conf);
assertEquals(conf.getRaw("a"), "A");
assertEquals(conf.getRaw("b"), "A");
}
@Test
public void testVarResolutionAndSysProps() {
String userName = System.getProperty("user.name");
Configuration conf = new Configuration(false);
conf.set("a", "A");
conf.set("b", "${a}");
conf.set("c", "${user.name}");
conf.set("d", "${aaa}");
assertEquals(conf.getRaw("a"), "A");
assertEquals(conf.getRaw("b"), "${a}");
assertEquals(conf.getRaw("c"), "${user.name}");
assertEquals(conf.get("a"), "A");
assertEquals(conf.get("b"), "A");
assertEquals(conf.get("c"), userName);
assertEquals(conf.get("d"), "${aaa}");
conf.set("user.name", "foo");
assertEquals(conf.get("user.name"), "foo");
}
}
| 4,442 | 33.710938 | 115 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/util/TestCheck.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.lib.util;
import static org.junit.Assert.assertEquals;
import java.util.ArrayList;
import java.util.Arrays;
import org.apache.hadoop.test.HTestCase;
import org.junit.Test;
public class TestCheck extends HTestCase {
@Test
public void notNullNotNull() {
assertEquals(Check.notNull("value", "name"), "value");
}
@Test(expected = IllegalArgumentException.class)
public void notNullNull() {
Check.notNull(null, "name");
}
@Test
public void notNullElementsNotNull() {
Check.notNullElements(new ArrayList<String>(), "name");
Check.notNullElements(Arrays.asList("a"), "name");
}
@Test(expected = IllegalArgumentException.class)
public void notNullElementsNullList() {
Check.notNullElements(null, "name");
}
@Test(expected = IllegalArgumentException.class)
public void notNullElementsNullElements() {
Check.notNullElements(Arrays.asList("a", "", null), "name");
}
@Test
public void notEmptyElementsNotNull() {
Check.notEmptyElements(new ArrayList<String>(), "name");
Check.notEmptyElements(Arrays.asList("a"), "name");
}
@Test(expected = IllegalArgumentException.class)
public void notEmptyElementsNullList() {
Check.notEmptyElements(null, "name");
}
@Test(expected = IllegalArgumentException.class)
public void notEmptyElementsNullElements() {
Check.notEmptyElements(Arrays.asList("a", null), "name");
}
@Test(expected = IllegalArgumentException.class)
public void notEmptyElementsEmptyElements() {
Check.notEmptyElements(Arrays.asList("a", ""), "name");
}
@Test
public void notEmptyNotEmtpy() {
assertEquals(Check.notEmpty("value", "name"), "value");
}
@Test(expected = IllegalArgumentException.class)
public void notEmptyNull() {
Check.notEmpty(null, "name");
}
@Test(expected = IllegalArgumentException.class)
public void notEmptyEmpty() {
Check.notEmpty("", "name");
}
@Test
public void validIdentifierValid() throws Exception {
assertEquals(Check.validIdentifier("a", 1, ""), "a");
assertEquals(Check.validIdentifier("a1", 2, ""), "a1");
assertEquals(Check.validIdentifier("a_", 3, ""), "a_");
assertEquals(Check.validIdentifier("_", 1, ""), "_");
}
@Test(expected = IllegalArgumentException.class)
public void validIdentifierInvalid1() throws Exception {
Check.validIdentifier("!", 1, "");
}
@Test(expected = IllegalArgumentException.class)
public void validIdentifierInvalid2() throws Exception {
Check.validIdentifier("a1", 1, "");
}
@Test(expected = IllegalArgumentException.class)
public void validIdentifierInvalid3() throws Exception {
Check.validIdentifier("1", 1, "");
}
@Test
public void checkGTZeroGreater() {
assertEquals(Check.gt0(120, "test"), 120);
}
@Test(expected = IllegalArgumentException.class)
public void checkGTZeroZero() {
Check.gt0(0, "test");
}
@Test(expected = IllegalArgumentException.class)
public void checkGTZeroLessThanZero() {
Check.gt0(-1, "test");
}
@Test
public void checkGEZero() {
assertEquals(Check.ge0(120, "test"), 120);
assertEquals(Check.ge0(0, "test"), 0);
}
@Test(expected = IllegalArgumentException.class)
public void checkGELessThanZero() {
Check.ge0(-1, "test");
}
}
| 4,109 | 27.150685 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/servlet/TestServerWebApp.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.lib.servlet;
import static org.junit.Assert.assertEquals;
import org.apache.hadoop.lib.server.Server;
import org.apache.hadoop.test.HTestCase;
import org.apache.hadoop.test.TestDir;
import org.apache.hadoop.test.TestDirHelper;
import org.junit.Assert;
import org.junit.Test;
import java.net.InetSocketAddress;
public class TestServerWebApp extends HTestCase {
@Test(expected = IllegalArgumentException.class)
public void getHomeDirNotDef() {
ServerWebApp.getHomeDir("TestServerWebApp00");
}
@Test
public void getHomeDir() {
System.setProperty("TestServerWebApp0.home.dir", "/tmp");
assertEquals(ServerWebApp.getHomeDir("TestServerWebApp0"), "/tmp");
assertEquals(ServerWebApp.getDir("TestServerWebApp0", ".log.dir", "/tmp/log"), "/tmp/log");
System.setProperty("TestServerWebApp0.log.dir", "/tmplog");
assertEquals(ServerWebApp.getDir("TestServerWebApp0", ".log.dir", "/tmp/log"), "/tmplog");
}
@Test
@TestDir
public void lifecycle() throws Exception {
String dir = TestDirHelper.getTestDir().getAbsolutePath();
System.setProperty("TestServerWebApp1.home.dir", dir);
System.setProperty("TestServerWebApp1.config.dir", dir);
System.setProperty("TestServerWebApp1.log.dir", dir);
System.setProperty("TestServerWebApp1.temp.dir", dir);
ServerWebApp server = new ServerWebApp("TestServerWebApp1") {
};
assertEquals(server.getStatus(), Server.Status.UNDEF);
server.contextInitialized(null);
assertEquals(server.getStatus(), Server.Status.NORMAL);
server.contextDestroyed(null);
assertEquals(server.getStatus(), Server.Status.SHUTDOWN);
}
@Test(expected = RuntimeException.class)
@TestDir
public void failedInit() throws Exception {
String dir = TestDirHelper.getTestDir().getAbsolutePath();
System.setProperty("TestServerWebApp2.home.dir", dir);
System.setProperty("TestServerWebApp2.config.dir", dir);
System.setProperty("TestServerWebApp2.log.dir", dir);
System.setProperty("TestServerWebApp2.temp.dir", dir);
System.setProperty("testserverwebapp2.services", "FOO");
ServerWebApp server = new ServerWebApp("TestServerWebApp2") {
};
server.contextInitialized(null);
}
@Test
@TestDir
public void testResolveAuthority() throws Exception {
String dir = TestDirHelper.getTestDir().getAbsolutePath();
System.setProperty("TestServerWebApp3.home.dir", dir);
System.setProperty("TestServerWebApp3.config.dir", dir);
System.setProperty("TestServerWebApp3.log.dir", dir);
System.setProperty("TestServerWebApp3.temp.dir", dir);
System.setProperty("testserverwebapp3.http.hostname", "localhost");
System.setProperty("testserverwebapp3.http.port", "14000");
ServerWebApp server = new ServerWebApp("TestServerWebApp3") {
};
InetSocketAddress address = server.resolveAuthority();
Assert.assertEquals("localhost", address.getHostName());
Assert.assertEquals(14000, address.getPort());
}
}
| 3,808 | 37.09 | 95 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/servlet/TestHostnameFilter.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.lib.servlet;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
import java.io.IOException;
import java.util.concurrent.atomic.AtomicBoolean;
import javax.servlet.Filter;
import javax.servlet.FilterChain;
import javax.servlet.ServletException;
import javax.servlet.ServletRequest;
import javax.servlet.ServletResponse;
import org.apache.hadoop.test.HTestCase;
import org.junit.Test;
import org.mockito.Mockito;
public class TestHostnameFilter extends HTestCase {
@Test
public void hostname() throws Exception {
ServletRequest request = Mockito.mock(ServletRequest.class);
Mockito.when(request.getRemoteAddr()).thenReturn("localhost");
ServletResponse response = Mockito.mock(ServletResponse.class);
final AtomicBoolean invoked = new AtomicBoolean();
FilterChain chain = new FilterChain() {
@Override
public void doFilter(ServletRequest servletRequest, ServletResponse servletResponse)
throws IOException, ServletException {
// Hostname was set to "localhost", but may get resolved automatically to
// "127.0.0.1" depending on OS.
assertTrue(HostnameFilter.get().contains("localhost") ||
HostnameFilter.get().contains("127.0.0.1"));
invoked.set(true);
}
};
Filter filter = new HostnameFilter();
filter.init(null);
assertNull(HostnameFilter.get());
filter.doFilter(request, response, chain);
assertTrue(invoked.get());
assertNull(HostnameFilter.get());
filter.destroy();
}
@Test
public void testMissingHostname() throws Exception {
ServletRequest request = Mockito.mock(ServletRequest.class);
Mockito.when(request.getRemoteAddr()).thenReturn(null);
ServletResponse response = Mockito.mock(ServletResponse.class);
final AtomicBoolean invoked = new AtomicBoolean();
FilterChain chain = new FilterChain() {
@Override
public void doFilter(ServletRequest servletRequest, ServletResponse servletResponse)
throws IOException, ServletException {
assertTrue(HostnameFilter.get().contains("???"));
invoked.set(true);
}
};
Filter filter = new HostnameFilter();
filter.init(null);
assertNull(HostnameFilter.get());
filter.doFilter(request, response, chain);
assertTrue(invoked.get());
assertNull(HostnameFilter.get());
filter.destroy();
}
}
| 3,238 | 32.391753 | 90 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/servlet/TestMDCFilter.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.lib.servlet;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
import java.io.IOException;
import java.security.Principal;
import java.util.concurrent.atomic.AtomicBoolean;
import javax.servlet.Filter;
import javax.servlet.FilterChain;
import javax.servlet.ServletException;
import javax.servlet.ServletRequest;
import javax.servlet.ServletResponse;
import javax.servlet.http.HttpServletRequest;
import org.apache.hadoop.test.HTestCase;
import org.junit.Test;
import org.mockito.Mockito;
import org.slf4j.MDC;
public class TestMDCFilter extends HTestCase {
@Test
public void mdc() throws Exception {
HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
Mockito.when(request.getUserPrincipal()).thenReturn(null);
Mockito.when(request.getMethod()).thenReturn("METHOD");
Mockito.when(request.getPathInfo()).thenReturn("/pathinfo");
ServletResponse response = Mockito.mock(ServletResponse.class);
final AtomicBoolean invoked = new AtomicBoolean();
FilterChain chain = new FilterChain() {
@Override
public void doFilter(ServletRequest servletRequest, ServletResponse servletResponse)
throws IOException, ServletException {
assertEquals(MDC.get("hostname"), null);
assertEquals(MDC.get("user"), null);
assertEquals(MDC.get("method"), "METHOD");
assertEquals(MDC.get("path"), "/pathinfo");
invoked.set(true);
}
};
MDC.clear();
Filter filter = new MDCFilter();
filter.init(null);
filter.doFilter(request, response, chain);
assertTrue(invoked.get());
assertNull(MDC.get("hostname"));
assertNull(MDC.get("user"));
assertNull(MDC.get("method"));
assertNull(MDC.get("path"));
Mockito.when(request.getUserPrincipal()).thenReturn(new Principal() {
@Override
public String getName() {
return "name";
}
});
invoked.set(false);
chain = new FilterChain() {
@Override
public void doFilter(ServletRequest servletRequest, ServletResponse servletResponse)
throws IOException, ServletException {
assertEquals(MDC.get("hostname"), null);
assertEquals(MDC.get("user"), "name");
assertEquals(MDC.get("method"), "METHOD");
assertEquals(MDC.get("path"), "/pathinfo");
invoked.set(true);
}
};
filter.doFilter(request, response, chain);
assertTrue(invoked.get());
HostnameFilter.HOSTNAME_TL.set("HOST");
invoked.set(false);
chain = new FilterChain() {
@Override
public void doFilter(ServletRequest servletRequest, ServletResponse servletResponse)
throws IOException, ServletException {
assertEquals(MDC.get("hostname"), "HOST");
assertEquals(MDC.get("user"), "name");
assertEquals(MDC.get("method"), "METHOD");
assertEquals(MDC.get("path"), "/pathinfo");
invoked.set(true);
}
};
filter.doFilter(request, response, chain);
assertTrue(invoked.get());
HostnameFilter.HOSTNAME_TL.remove();
filter.destroy();
}
}
| 3,985 | 31.672131 | 90 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/server/TestBaseService.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.lib.server;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.test.HTestCase;
import org.junit.Test;
import org.mockito.Mockito;
public class TestBaseService extends HTestCase {
public static class MyService extends BaseService {
static Boolean INIT;
public MyService() {
super("myservice");
}
@Override
protected void init() throws ServiceException {
INIT = true;
}
@Override
public Class getInterface() {
return null;
}
}
@Test
public void baseService() throws Exception {
BaseService service = new MyService();
assertNull(service.getInterface());
assertEquals(service.getPrefix(), "myservice");
assertEquals(service.getServiceDependencies().length, 0);
Server server = Mockito.mock(Server.class);
Configuration conf = new Configuration(false);
conf.set("server.myservice.foo", "FOO");
conf.set("server.myservice1.bar", "BAR");
Mockito.when(server.getConfig()).thenReturn(conf);
Mockito.when(server.getPrefixedName("myservice.foo")).thenReturn("server.myservice.foo");
Mockito.when(server.getPrefixedName("myservice.")).thenReturn("server.myservice.");
service.init(server);
assertEquals(service.getPrefixedName("foo"), "server.myservice.foo");
assertEquals(service.getServiceConfig().size(), 1);
assertEquals(service.getServiceConfig().get("foo"), "FOO");
assertTrue(MyService.INIT);
}
}
| 2,414 | 32.541667 | 93 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/server/TestServer.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.lib.server;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.io.File;
import java.io.FileOutputStream;
import java.io.FileWriter;
import java.io.InputStream;
import java.io.OutputStream;
import java.io.Writer;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.lib.lang.XException;
import org.apache.hadoop.test.HTestCase;
import org.apache.hadoop.test.TestDir;
import org.apache.hadoop.test.TestDirHelper;
import org.apache.hadoop.test.TestException;
import org.apache.hadoop.util.Shell;
import org.apache.hadoop.util.StringUtils;
import org.junit.Test;
public class TestServer extends HTestCase {
@Test
@TestDir
public void constructorsGetters() throws Exception {
Server server = new Server("server", getAbsolutePath("/a"),
getAbsolutePath("/b"), getAbsolutePath("/c"), getAbsolutePath("/d"),
new Configuration(false));
assertEquals(server.getHomeDir(), getAbsolutePath("/a"));
assertEquals(server.getConfigDir(), getAbsolutePath("/b"));
assertEquals(server.getLogDir(), getAbsolutePath("/c"));
assertEquals(server.getTempDir(), getAbsolutePath("/d"));
assertEquals(server.getName(), "server");
assertEquals(server.getPrefix(), "server");
assertEquals(server.getPrefixedName("name"), "server.name");
assertNotNull(server.getConfig());
server = new Server("server", getAbsolutePath("/a"), getAbsolutePath("/b"),
getAbsolutePath("/c"), getAbsolutePath("/d"));
assertEquals(server.getHomeDir(), getAbsolutePath("/a"));
assertEquals(server.getConfigDir(), getAbsolutePath("/b"));
assertEquals(server.getLogDir(), getAbsolutePath("/c"));
assertEquals(server.getTempDir(), getAbsolutePath("/d"));
assertEquals(server.getName(), "server");
assertEquals(server.getPrefix(), "server");
assertEquals(server.getPrefixedName("name"), "server.name");
assertNull(server.getConfig());
server = new Server("server", TestDirHelper.getTestDir().getAbsolutePath(), new Configuration(false));
assertEquals(server.getHomeDir(), TestDirHelper.getTestDir().getAbsolutePath());
assertEquals(server.getConfigDir(), TestDirHelper.getTestDir() + "/conf");
assertEquals(server.getLogDir(), TestDirHelper.getTestDir() + "/log");
assertEquals(server.getTempDir(), TestDirHelper.getTestDir() + "/temp");
assertEquals(server.getName(), "server");
assertEquals(server.getPrefix(), "server");
assertEquals(server.getPrefixedName("name"), "server.name");
assertNotNull(server.getConfig());
server = new Server("server", TestDirHelper.getTestDir().getAbsolutePath());
assertEquals(server.getHomeDir(), TestDirHelper.getTestDir().getAbsolutePath());
assertEquals(server.getConfigDir(), TestDirHelper.getTestDir() + "/conf");
assertEquals(server.getLogDir(), TestDirHelper.getTestDir() + "/log");
assertEquals(server.getTempDir(), TestDirHelper.getTestDir() + "/temp");
assertEquals(server.getName(), "server");
assertEquals(server.getPrefix(), "server");
assertEquals(server.getPrefixedName("name"), "server.name");
assertNull(server.getConfig());
}
@Test
@TestException(exception = ServerException.class, msgRegExp = "S01.*")
@TestDir
public void initNoHomeDir() throws Exception {
File homeDir = new File(TestDirHelper.getTestDir(), "home");
Configuration conf = new Configuration(false);
conf.set("server.services", TestService.class.getName());
Server server = new Server("server", homeDir.getAbsolutePath(), conf);
server.init();
}
@Test
@TestException(exception = ServerException.class, msgRegExp = "S02.*")
@TestDir
public void initHomeDirNotDir() throws Exception {
File homeDir = new File(TestDirHelper.getTestDir(), "home");
new FileOutputStream(homeDir).close();
Configuration conf = new Configuration(false);
conf.set("server.services", TestService.class.getName());
Server server = new Server("server", homeDir.getAbsolutePath(), conf);
server.init();
}
@Test
@TestException(exception = ServerException.class, msgRegExp = "S01.*")
@TestDir
public void initNoConfigDir() throws Exception {
File homeDir = new File(TestDirHelper.getTestDir(), "home");
assertTrue(homeDir.mkdir());
assertTrue(new File(homeDir, "log").mkdir());
assertTrue(new File(homeDir, "temp").mkdir());
Configuration conf = new Configuration(false);
conf.set("server.services", TestService.class.getName());
Server server = new Server("server", homeDir.getAbsolutePath(), conf);
server.init();
}
@Test
@TestException(exception = ServerException.class, msgRegExp = "S02.*")
@TestDir
public void initConfigDirNotDir() throws Exception {
File homeDir = new File(TestDirHelper.getTestDir(), "home");
assertTrue(homeDir.mkdir());
assertTrue(new File(homeDir, "log").mkdir());
assertTrue(new File(homeDir, "temp").mkdir());
File configDir = new File(homeDir, "conf");
new FileOutputStream(configDir).close();
Configuration conf = new Configuration(false);
conf.set("server.services", TestService.class.getName());
Server server = new Server("server", homeDir.getAbsolutePath(), conf);
server.init();
}
@Test
@TestException(exception = ServerException.class, msgRegExp = "S01.*")
@TestDir
public void initNoLogDir() throws Exception {
File homeDir = new File(TestDirHelper.getTestDir(), "home");
assertTrue(homeDir.mkdir());
assertTrue(new File(homeDir, "conf").mkdir());
assertTrue(new File(homeDir, "temp").mkdir());
Configuration conf = new Configuration(false);
conf.set("server.services", TestService.class.getName());
Server server = new Server("server", homeDir.getAbsolutePath(), conf);
server.init();
}
@Test
@TestException(exception = ServerException.class, msgRegExp = "S02.*")
@TestDir
public void initLogDirNotDir() throws Exception {
File homeDir = new File(TestDirHelper.getTestDir(), "home");
assertTrue(homeDir.mkdir());
assertTrue(new File(homeDir, "conf").mkdir());
assertTrue(new File(homeDir, "temp").mkdir());
File logDir = new File(homeDir, "log");
new FileOutputStream(logDir).close();
Configuration conf = new Configuration(false);
conf.set("server.services", TestService.class.getName());
Server server = new Server("server", homeDir.getAbsolutePath(), conf);
server.init();
}
@Test
@TestException(exception = ServerException.class, msgRegExp = "S01.*")
@TestDir
public void initNoTempDir() throws Exception {
File homeDir = new File(TestDirHelper.getTestDir(), "home");
assertTrue(homeDir.mkdir());
assertTrue(new File(homeDir, "conf").mkdir());
assertTrue(new File(homeDir, "log").mkdir());
Configuration conf = new Configuration(false);
conf.set("server.services", TestService.class.getName());
Server server = new Server("server", homeDir.getAbsolutePath(), conf);
server.init();
}
@Test
@TestException(exception = ServerException.class, msgRegExp = "S02.*")
@TestDir
public void initTempDirNotDir() throws Exception {
File homeDir = new File(TestDirHelper.getTestDir(), "home");
assertTrue(homeDir.mkdir());
assertTrue(new File(homeDir, "conf").mkdir());
assertTrue(new File(homeDir, "log").mkdir());
File tempDir = new File(homeDir, "temp");
new FileOutputStream(tempDir).close();
Configuration conf = new Configuration(false);
conf.set("server.services", TestService.class.getName());
Server server = new Server("server", homeDir.getAbsolutePath(), conf);
server.init();
}
@Test
@TestException(exception = ServerException.class, msgRegExp = "S05.*")
@TestDir
public void siteFileNotAFile() throws Exception {
String homeDir = TestDirHelper.getTestDir().getAbsolutePath();
File siteFile = new File(homeDir, "server-site.xml");
assertTrue(siteFile.mkdir());
Server server = new Server("server", homeDir, homeDir, homeDir, homeDir);
server.init();
}
private Server createServer(Configuration conf) {
return new Server("server", TestDirHelper.getTestDir().getAbsolutePath(),
TestDirHelper.getTestDir().getAbsolutePath(),
TestDirHelper.getTestDir().getAbsolutePath(), TestDirHelper.getTestDir().getAbsolutePath(), conf);
}
@Test
@TestDir
public void log4jFile() throws Exception {
InputStream is = Server.getResource("default-log4j.properties");
OutputStream os = new FileOutputStream(new File(TestDirHelper.getTestDir(), "server-log4j.properties"));
IOUtils.copyBytes(is, os, 1024, true);
Configuration conf = new Configuration(false);
Server server = createServer(conf);
server.init();
}
public static class LifeCycleService extends BaseService {
public LifeCycleService() {
super("lifecycle");
}
@Override
protected void init() throws ServiceException {
assertEquals(getServer().getStatus(), Server.Status.BOOTING);
}
@Override
public void destroy() {
assertEquals(getServer().getStatus(), Server.Status.SHUTTING_DOWN);
super.destroy();
}
@Override
public Class getInterface() {
return LifeCycleService.class;
}
}
@Test
@TestDir
public void lifeCycle() throws Exception {
Configuration conf = new Configuration(false);
conf.set("server.services", LifeCycleService.class.getName());
Server server = createServer(conf);
assertEquals(server.getStatus(), Server.Status.UNDEF);
server.init();
assertNotNull(server.get(LifeCycleService.class));
assertEquals(server.getStatus(), Server.Status.NORMAL);
server.destroy();
assertEquals(server.getStatus(), Server.Status.SHUTDOWN);
}
@Test
@TestDir
public void startWithStatusNotNormal() throws Exception {
Configuration conf = new Configuration(false);
conf.set("server.startup.status", "ADMIN");
Server server = createServer(conf);
server.init();
assertEquals(server.getStatus(), Server.Status.ADMIN);
server.destroy();
}
@Test(expected = IllegalArgumentException.class)
@TestDir
public void nonSeteableStatus() throws Exception {
Configuration conf = new Configuration(false);
Server server = createServer(conf);
server.init();
server.setStatus(Server.Status.SHUTDOWN);
}
public static class TestService implements Service {
static List<String> LIFECYCLE = new ArrayList<String>();
@Override
public void init(Server server) throws ServiceException {
LIFECYCLE.add("init");
}
@Override
public void postInit() throws ServiceException {
LIFECYCLE.add("postInit");
}
@Override
public void destroy() {
LIFECYCLE.add("destroy");
}
@Override
public Class[] getServiceDependencies() {
return new Class[0];
}
@Override
public Class getInterface() {
return TestService.class;
}
@Override
public void serverStatusChange(Server.Status oldStatus, Server.Status newStatus) throws ServiceException {
LIFECYCLE.add("serverStatusChange");
}
}
public static class TestServiceExceptionOnStatusChange extends TestService {
@Override
public void serverStatusChange(Server.Status oldStatus, Server.Status newStatus) throws ServiceException {
throw new RuntimeException();
}
}
@Test
@TestDir
public void changeStatus() throws Exception {
TestService.LIFECYCLE.clear();
Configuration conf = new Configuration(false);
conf.set("server.services", TestService.class.getName());
Server server = createServer(conf);
server.init();
server.setStatus(Server.Status.ADMIN);
assertTrue(TestService.LIFECYCLE.contains("serverStatusChange"));
}
@Test
@TestException(exception = ServerException.class, msgRegExp = "S11.*")
@TestDir
public void changeStatusServiceException() throws Exception {
TestService.LIFECYCLE.clear();
Configuration conf = new Configuration(false);
conf.set("server.services", TestServiceExceptionOnStatusChange.class.getName());
Server server = createServer(conf);
server.init();
}
@Test
@TestDir
public void setSameStatus() throws Exception {
Configuration conf = new Configuration(false);
conf.set("server.services", TestService.class.getName());
Server server = createServer(conf);
server.init();
TestService.LIFECYCLE.clear();
server.setStatus(server.getStatus());
assertFalse(TestService.LIFECYCLE.contains("serverStatusChange"));
}
@Test
@TestDir
public void serviceLifeCycle() throws Exception {
TestService.LIFECYCLE.clear();
Configuration conf = new Configuration(false);
conf.set("server.services", TestService.class.getName());
Server server = createServer(conf);
server.init();
assertNotNull(server.get(TestService.class));
server.destroy();
assertEquals(TestService.LIFECYCLE, Arrays.asList("init", "postInit", "serverStatusChange", "destroy"));
}
@Test
@TestDir
public void loadingDefaultConfig() throws Exception {
String dir = TestDirHelper.getTestDir().getAbsolutePath();
Server server = new Server("testserver", dir, dir, dir, dir);
server.init();
assertEquals(server.getConfig().get("testserver.a"), "default");
}
@Test
@TestDir
public void loadingSiteConfig() throws Exception {
String dir = TestDirHelper.getTestDir().getAbsolutePath();
File configFile = new File(dir, "testserver-site.xml");
Writer w = new FileWriter(configFile);
w.write("<configuration><property><name>testserver.a</name><value>site</value></property></configuration>");
w.close();
Server server = new Server("testserver", dir, dir, dir, dir);
server.init();
assertEquals(server.getConfig().get("testserver.a"), "site");
}
@Test
@TestDir
public void loadingSysPropConfig() throws Exception {
try {
System.setProperty("testserver.a", "sysprop");
String dir = TestDirHelper.getTestDir().getAbsolutePath();
File configFile = new File(dir, "testserver-site.xml");
Writer w = new FileWriter(configFile);
w.write("<configuration><property><name>testserver.a</name><value>site</value></property></configuration>");
w.close();
Server server = new Server("testserver", dir, dir, dir, dir);
server.init();
assertEquals(server.getConfig().get("testserver.a"), "sysprop");
} finally {
System.getProperties().remove("testserver.a");
}
}
@Test(expected = IllegalStateException.class)
@TestDir
public void illegalState1() throws Exception {
Server server = new Server("server", TestDirHelper.getTestDir().getAbsolutePath(), new Configuration(false));
server.destroy();
}
@Test(expected = IllegalStateException.class)
@TestDir
public void illegalState2() throws Exception {
Server server = new Server("server", TestDirHelper.getTestDir().getAbsolutePath(), new Configuration(false));
server.get(Object.class);
}
@Test(expected = IllegalStateException.class)
@TestDir
public void illegalState3() throws Exception {
Server server = new Server("server", TestDirHelper.getTestDir().getAbsolutePath(), new Configuration(false));
server.setService(null);
}
@Test(expected = IllegalStateException.class)
@TestDir
public void illegalState4() throws Exception {
String dir = TestDirHelper.getTestDir().getAbsolutePath();
Server server = new Server("server", dir, dir, dir, dir, new Configuration(false));
server.init();
server.init();
}
private static List<String> ORDER = new ArrayList<String>();
public abstract static class MyService implements Service, XException.ERROR {
private String id;
private Class serviceInterface;
private Class[] dependencies;
private boolean failOnInit;
private boolean failOnDestroy;
protected MyService(String id, Class serviceInterface, Class[] dependencies, boolean failOnInit,
boolean failOnDestroy) {
this.id = id;
this.serviceInterface = serviceInterface;
this.dependencies = dependencies;
this.failOnInit = failOnInit;
this.failOnDestroy = failOnDestroy;
}
@Override
public void init(Server server) throws ServiceException {
ORDER.add(id + ".init");
if (failOnInit) {
throw new ServiceException(this);
}
}
@Override
public void postInit() throws ServiceException {
ORDER.add(id + ".postInit");
}
@Override
public String getTemplate() {
return "";
}
@Override
public void destroy() {
ORDER.add(id + ".destroy");
if (failOnDestroy) {
throw new RuntimeException();
}
}
@Override
public Class[] getServiceDependencies() {
return dependencies;
}
@Override
public Class getInterface() {
return serviceInterface;
}
@Override
public void serverStatusChange(Server.Status oldStatus, Server.Status newStatus) throws ServiceException {
}
}
public static class MyService1 extends MyService {
public MyService1() {
super("s1", MyService1.class, null, false, false);
}
protected MyService1(String id, Class serviceInterface, Class[] dependencies, boolean failOnInit,
boolean failOnDestroy) {
super(id, serviceInterface, dependencies, failOnInit, failOnDestroy);
}
}
public static class MyService2 extends MyService {
public MyService2() {
super("s2", MyService2.class, null, true, false);
}
}
public static class MyService3 extends MyService {
public MyService3() {
super("s3", MyService3.class, null, false, false);
}
}
public static class MyService1a extends MyService1 {
public MyService1a() {
super("s1a", MyService1.class, null, false, false);
}
}
public static class MyService4 extends MyService1 {
public MyService4() {
super("s4a", String.class, null, false, false);
}
}
public static class MyService5 extends MyService {
public MyService5() {
super("s5", MyService5.class, null, false, true);
}
protected MyService5(String id, Class serviceInterface, Class[] dependencies, boolean failOnInit,
boolean failOnDestroy) {
super(id, serviceInterface, dependencies, failOnInit, failOnDestroy);
}
}
public static class MyService5a extends MyService5 {
public MyService5a() {
super("s5a", MyService5.class, null, false, false);
}
}
public static class MyService6 extends MyService {
public MyService6() {
super("s6", MyService6.class, new Class[]{MyService1.class}, false, false);
}
}
public static class MyService7 extends MyService {
@SuppressWarnings({"UnusedParameters"})
public MyService7(String foo) {
super("s6", MyService7.class, new Class[]{MyService1.class}, false, false);
}
}
@Test
@TestException(exception = ServerException.class, msgRegExp = "S08.*")
@TestDir
public void invalidSservice() throws Exception {
String dir = TestDirHelper.getTestDir().getAbsolutePath();
Configuration conf = new Configuration(false);
conf.set("server.services", "foo");
Server server = new Server("server", dir, dir, dir, dir, conf);
server.init();
}
@Test
@TestException(exception = ServerException.class, msgRegExp = "S07.*")
@TestDir
public void serviceWithNoDefaultConstructor() throws Exception {
String dir = TestDirHelper.getTestDir().getAbsolutePath();
Configuration conf = new Configuration(false);
conf.set("server.services", MyService7.class.getName());
Server server = new Server("server", dir, dir, dir, dir, conf);
server.init();
}
@Test
@TestException(exception = ServerException.class, msgRegExp = "S04.*")
@TestDir
public void serviceNotImplementingServiceInterface() throws Exception {
String dir = TestDirHelper.getTestDir().getAbsolutePath();
Configuration conf = new Configuration(false);
conf.set("server.services", MyService4.class.getName());
Server server = new Server("server", dir, dir, dir, dir, conf);
server.init();
}
@Test
@TestException(exception = ServerException.class, msgRegExp = "S10.*")
@TestDir
public void serviceWithMissingDependency() throws Exception {
String dir = TestDirHelper.getTestDir().getAbsolutePath();
Configuration conf = new Configuration(false);
String services = StringUtils.join(",", Arrays.asList(MyService3.class.getName(), MyService6.class.getName())
);
conf.set("server.services", services);
Server server = new Server("server", dir, dir, dir, dir, conf);
server.init();
}
@Test
@TestDir
public void services() throws Exception {
String dir = TestDirHelper.getTestDir().getAbsolutePath();
Configuration conf;
Server server;
// no services
ORDER.clear();
conf = new Configuration(false);
server = new Server("server", dir, dir, dir, dir, conf);
server.init();
assertEquals(ORDER.size(), 0);
// 2 services init/destroy
ORDER.clear();
String services = StringUtils.join(",", Arrays.asList(MyService1.class.getName(), MyService3.class.getName())
);
conf = new Configuration(false);
conf.set("server.services", services);
server = new Server("server", dir, dir, dir, dir, conf);
server.init();
assertEquals(server.get(MyService1.class).getInterface(), MyService1.class);
assertEquals(server.get(MyService3.class).getInterface(), MyService3.class);
assertEquals(ORDER.size(), 4);
assertEquals(ORDER.get(0), "s1.init");
assertEquals(ORDER.get(1), "s3.init");
assertEquals(ORDER.get(2), "s1.postInit");
assertEquals(ORDER.get(3), "s3.postInit");
server.destroy();
assertEquals(ORDER.size(), 6);
assertEquals(ORDER.get(4), "s3.destroy");
assertEquals(ORDER.get(5), "s1.destroy");
// 3 services, 2nd one fails on init
ORDER.clear();
services = StringUtils.join(",", Arrays.asList(MyService1.class.getName(), MyService2.class.getName(),
MyService3.class.getName()));
conf = new Configuration(false);
conf.set("server.services", services);
server = new Server("server", dir, dir, dir, dir, conf);
try {
server.init();
fail();
} catch (ServerException ex) {
assertEquals(MyService2.class, ex.getError().getClass());
} catch (Exception ex) {
fail();
}
assertEquals(ORDER.size(), 3);
assertEquals(ORDER.get(0), "s1.init");
assertEquals(ORDER.get(1), "s2.init");
assertEquals(ORDER.get(2), "s1.destroy");
// 2 services one fails on destroy
ORDER.clear();
services = StringUtils.join(",", Arrays.asList(MyService1.class.getName(), MyService5.class.getName()));
conf = new Configuration(false);
conf.set("server.services", services);
server = new Server("server", dir, dir, dir, dir, conf);
server.init();
assertEquals(ORDER.size(), 4);
assertEquals(ORDER.get(0), "s1.init");
assertEquals(ORDER.get(1), "s5.init");
assertEquals(ORDER.get(2), "s1.postInit");
assertEquals(ORDER.get(3), "s5.postInit");
server.destroy();
assertEquals(ORDER.size(), 6);
assertEquals(ORDER.get(4), "s5.destroy");
assertEquals(ORDER.get(5), "s1.destroy");
// service override via ext
ORDER.clear();
services = StringUtils.join(",", Arrays.asList(MyService1.class.getName(), MyService3.class.getName()));
String servicesExt = StringUtils.join(",", Arrays.asList(MyService1a.class.getName()));
conf = new Configuration(false);
conf.set("server.services", services);
conf.set("server.services.ext", servicesExt);
server = new Server("server", dir, dir, dir, dir, conf);
server.init();
assertEquals(server.get(MyService1.class).getClass(), MyService1a.class);
assertEquals(ORDER.size(), 4);
assertEquals(ORDER.get(0), "s1a.init");
assertEquals(ORDER.get(1), "s3.init");
assertEquals(ORDER.get(2), "s1a.postInit");
assertEquals(ORDER.get(3), "s3.postInit");
server.destroy();
assertEquals(ORDER.size(), 6);
assertEquals(ORDER.get(4), "s3.destroy");
assertEquals(ORDER.get(5), "s1a.destroy");
// service override via setService
ORDER.clear();
services = StringUtils.join(",", Arrays.asList(MyService1.class.getName(), MyService3.class.getName()));
conf = new Configuration(false);
conf.set("server.services", services);
server = new Server("server", dir, dir, dir, dir, conf);
server.init();
server.setService(MyService1a.class);
assertEquals(ORDER.size(), 6);
assertEquals(ORDER.get(4), "s1.destroy");
assertEquals(ORDER.get(5), "s1a.init");
assertEquals(server.get(MyService1.class).getClass(), MyService1a.class);
server.destroy();
assertEquals(ORDER.size(), 8);
assertEquals(ORDER.get(6), "s3.destroy");
assertEquals(ORDER.get(7), "s1a.destroy");
// service add via setService
ORDER.clear();
services = StringUtils.join(",", Arrays.asList(MyService1.class.getName(), MyService3.class.getName()));
conf = new Configuration(false);
conf.set("server.services", services);
server = new Server("server", dir, dir, dir, dir, conf);
server.init();
server.setService(MyService5.class);
assertEquals(ORDER.size(), 5);
assertEquals(ORDER.get(4), "s5.init");
assertEquals(server.get(MyService5.class).getClass(), MyService5.class);
server.destroy();
assertEquals(ORDER.size(), 8);
assertEquals(ORDER.get(5), "s5.destroy");
assertEquals(ORDER.get(6), "s3.destroy");
assertEquals(ORDER.get(7), "s1.destroy");
// service add via setService exception
ORDER.clear();
services = StringUtils.join(",", Arrays.asList(MyService1.class.getName(), MyService3.class.getName()));
conf = new Configuration(false);
conf.set("server.services", services);
server = new Server("server", dir, dir, dir, dir, conf);
server.init();
try {
server.setService(MyService7.class);
fail();
} catch (ServerException ex) {
assertEquals(ServerException.ERROR.S09, ex.getError());
} catch (Exception ex) {
fail();
}
assertEquals(ORDER.size(), 6);
assertEquals(ORDER.get(4), "s3.destroy");
assertEquals(ORDER.get(5), "s1.destroy");
// service with dependency
ORDER.clear();
services = StringUtils.join(",", Arrays.asList(MyService1.class.getName(), MyService6.class.getName()));
conf = new Configuration(false);
conf.set("server.services", services);
server = new Server("server", dir, dir, dir, dir, conf);
server.init();
assertEquals(server.get(MyService1.class).getInterface(), MyService1.class);
assertEquals(server.get(MyService6.class).getInterface(), MyService6.class);
server.destroy();
}
/**
* Creates an absolute path by appending the given relative path to the test
* root.
*
* @param relativePath String relative path
* @return String absolute path formed by appending relative path to test root
*/
private static String getAbsolutePath(String relativePath) {
return new File(TestDirHelper.getTestDir(), relativePath).getAbsolutePath();
}
}
| 28,491 | 34.131936 | 120 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/server/TestServerConstructor.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.lib.server;
import java.util.Arrays;
import java.util.Collection;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.test.HTestCase;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
@RunWith(value = Parameterized.class)
public class TestServerConstructor extends HTestCase {
@Parameterized.Parameters
public static Collection constructorFailParams() {
return Arrays.asList(new Object[][]{
{null, null, null, null, null, null},
{"", null, null, null, null, null},
{null, null, null, null, null, null},
{"server", null, null, null, null, null},
{"server", "", null, null, null, null},
{"server", "foo", null, null, null, null},
{"server", "/tmp", null, null, null, null},
{"server", "/tmp", "", null, null, null},
{"server", "/tmp", "foo", null, null, null},
{"server", "/tmp", "/tmp", null, null, null},
{"server", "/tmp", "/tmp", "", null, null},
{"server", "/tmp", "/tmp", "foo", null, null},
{"server", "/tmp", "/tmp", "/tmp", null, null},
{"server", "/tmp", "/tmp", "/tmp", "", null},
{"server", "/tmp", "/tmp", "/tmp", "foo", null}});
}
private String name;
private String homeDir;
private String configDir;
private String logDir;
private String tempDir;
private Configuration conf;
public TestServerConstructor(String name, String homeDir, String configDir, String logDir, String tempDir,
Configuration conf) {
this.name = name;
this.homeDir = homeDir;
this.configDir = configDir;
this.logDir = logDir;
this.tempDir = tempDir;
this.conf = conf;
}
@Test(expected = IllegalArgumentException.class)
public void constructorFail() {
new Server(name, homeDir, configDir, logDir, tempDir, conf);
}
}
| 2,688 | 33.922078 | 108 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestInputStreamEntity.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.lib.wsrs;
import static org.junit.Assert.assertEquals;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.InputStream;
import org.junit.Test;
public class TestInputStreamEntity {
@Test
public void test() throws Exception {
InputStream is = new ByteArrayInputStream("abc".getBytes());
ByteArrayOutputStream baos = new ByteArrayOutputStream();
InputStreamEntity i = new InputStreamEntity(is);
i.write(baos);
baos.close();
assertEquals(new String(baos.toByteArray()), "abc");
is = new ByteArrayInputStream("abc".getBytes());
baos = new ByteArrayOutputStream();
i = new InputStreamEntity(is, 1, 1);
i.write(baos);
baos.close();
assertEquals(baos.toByteArray()[0], 'b');
}
}
| 1,604 | 31.755102 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestParam.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.lib.wsrs;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.fail;
import java.util.regex.Pattern;
import org.junit.Test;
public class TestParam {
private <T> void test(Param<T> param, String name,
String domain, T defaultValue, T validValue,
String invalidStrValue, String outOfRangeValue) throws Exception {
assertEquals(name, param.getName());
assertEquals(domain, param.getDomain());
assertEquals(defaultValue, param.value());
assertEquals(defaultValue, param.parseParam(""));
assertEquals(defaultValue, param.parseParam(null));
assertEquals(validValue, param.parseParam(validValue.toString()));
if (invalidStrValue != null) {
try {
param.parseParam(invalidStrValue);
fail();
} catch (IllegalArgumentException ex) {
//NOP
} catch (Exception ex) {
fail();
}
}
if (outOfRangeValue != null) {
try {
param.parseParam(outOfRangeValue);
fail();
} catch (IllegalArgumentException ex) {
//NOP
} catch (Exception ex) {
fail();
}
}
}
@Test
public void testBoolean() throws Exception {
Param<Boolean> param = new BooleanParam("b", false) {
};
test(param, "b", "a boolean", false, true, "x", null);
}
@Test
public void testByte() throws Exception {
Param<Byte> param = new ByteParam("B", (byte) 1) {
};
test(param, "B", "a byte", (byte) 1, (byte) 2, "x", "256");
}
@Test
public void testShort() throws Exception {
Param<Short> param = new ShortParam("S", (short) 1) {
};
test(param, "S", "a short", (short) 1, (short) 2, "x",
"" + ((int)Short.MAX_VALUE + 1));
param = new ShortParam("S", (short) 1, 8) {
};
assertEquals(new Short((short)01777), param.parse("01777"));
}
@Test
public void testInteger() throws Exception {
Param<Integer> param = new IntegerParam("I", 1) {
};
test(param, "I", "an integer", 1, 2, "x", "" + ((long)Integer.MAX_VALUE + 1));
}
@Test
public void testLong() throws Exception {
Param<Long> param = new LongParam("L", 1L) {
};
test(param, "L", "a long", 1L, 2L, "x", null);
}
public static enum ENUM {
FOO, BAR
}
@Test
public void testEnum() throws Exception {
EnumParam<ENUM> param = new EnumParam<ENUM>("e", ENUM.class, ENUM.FOO) {
};
test(param, "e", "FOO,BAR", ENUM.FOO, ENUM.BAR, "x", null);
}
@Test
public void testString() throws Exception {
Param<String> param = new StringParam("s", "foo") {
};
test(param, "s", "a string", "foo", "bar", null, null);
}
@Test
public void testRegEx() throws Exception {
Param<String> param = new StringParam("r", "aa", Pattern.compile("..")) {
};
test(param, "r", "..", "aa", "bb", "c", null);
}
}
| 3,699 | 27.90625 | 85 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestJSONMapProvider.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.lib.wsrs;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import java.io.ByteArrayOutputStream;
import java.util.Map;
import org.json.simple.JSONObject;
import org.junit.Test;
public class TestJSONMapProvider {
@Test
@SuppressWarnings("unchecked")
public void test() throws Exception {
JSONMapProvider p = new JSONMapProvider();
assertTrue(p.isWriteable(Map.class, null, null, null));
assertFalse(p.isWriteable(this.getClass(), null, null, null));
assertEquals(p.getSize(null, null, null, null, null), -1);
ByteArrayOutputStream baos = new ByteArrayOutputStream();
JSONObject json = new JSONObject();
json.put("a", "A");
p.writeTo(json, JSONObject.class, null, null, null, null, baos);
baos.close();
assertEquals(new String(baos.toByteArray()).trim(), "{\"a\":\"A\"}");
}
}
| 1,746 | 34.653061 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestJSONProvider.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.lib.wsrs;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import java.io.ByteArrayOutputStream;
import org.json.simple.JSONObject;
import org.junit.Test;
public class TestJSONProvider {
@Test
@SuppressWarnings("unchecked")
public void test() throws Exception {
JSONProvider p = new JSONProvider();
assertTrue(p.isWriteable(JSONObject.class, null, null, null));
assertFalse(p.isWriteable(this.getClass(), null, null, null));
assertEquals(p.getSize(null, null, null, null, null), -1);
ByteArrayOutputStream baos = new ByteArrayOutputStream();
JSONObject json = new JSONObject();
json.put("a", "A");
p.writeTo(json, JSONObject.class, null, null, null, null, baos);
baos.close();
assertEquals(new String(baos.toByteArray()).trim(), "{\"a\":\"A\"}");
}
}
| 1,722 | 34.895833 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSUtils.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.http.client;
import org.apache.commons.io.Charsets;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.fs.Path;
import org.json.simple.parser.JSONParser;
import org.json.simple.parser.ParseException;
import java.io.IOException;
import java.io.InputStreamReader;
import java.net.HttpURLConnection;
import java.net.URI;
import java.net.URL;
import java.net.URLEncoder;
import java.text.MessageFormat;
import java.util.List;
import java.util.Map;
/**
* Utility methods used by HttpFS classes.
*/
@InterfaceAudience.Private
public class HttpFSUtils {
public static final String SERVICE_NAME = "/webhdfs";
public static final String SERVICE_VERSION = "/v1";
private static final String SERVICE_PATH = SERVICE_NAME + SERVICE_VERSION;
/**
* Convenience method that creates an HTTP <code>URL</code> for the
* HttpFSServer file system operations.
* <p/>
*
* @param path the file path.
* @param params the query string parameters.
*
* @return a <code>URL</code> for the HttpFSServer server,
*
* @throws IOException thrown if an IO error occurs.
*/
static URL createURL(Path path, Map<String, String> params)
throws IOException {
return createURL(path, params, null);
}
/**
* Convenience method that creates an HTTP <code>URL</code> for the
* HttpFSServer file system operations.
* <p/>
*
* @param path the file path.
* @param params the query string parameters.
* @param multiValuedParams multi valued parameters of the query string
*
* @return URL a <code>URL</code> for the HttpFSServer server,
*
* @throws IOException thrown if an IO error occurs.
*/
static URL createURL(Path path, Map<String, String> params, Map<String,
List<String>> multiValuedParams) throws IOException {
URI uri = path.toUri();
String realScheme;
if (uri.getScheme().equalsIgnoreCase(HttpFSFileSystem.SCHEME)) {
realScheme = "http";
} else if (uri.getScheme().equalsIgnoreCase(HttpsFSFileSystem.SCHEME)) {
realScheme = "https";
} else {
throw new IllegalArgumentException(MessageFormat.format(
"Invalid scheme [{0}] it should be '" + HttpFSFileSystem.SCHEME + "' " +
"or '" + HttpsFSFileSystem.SCHEME + "'", uri));
}
StringBuilder sb = new StringBuilder();
sb.append(realScheme).append("://").append(uri.getAuthority()).
append(SERVICE_PATH).append(uri.getPath());
String separator = "?";
for (Map.Entry<String, String> entry : params.entrySet()) {
sb.append(separator).append(entry.getKey()).append("=").
append(URLEncoder.encode(entry.getValue(), "UTF8"));
separator = "&";
}
if (multiValuedParams != null) {
for (Map.Entry<String, List<String>> multiValuedEntry :
multiValuedParams.entrySet()) {
String name = URLEncoder.encode(multiValuedEntry.getKey(), "UTF8");
List<String> values = multiValuedEntry.getValue();
for (String value : values) {
sb.append(separator).append(name).append("=").
append(URLEncoder.encode(value, "UTF8"));
separator = "&";
}
}
}
return new URL(sb.toString());
}
/**
* Convenience method that JSON Parses the <code>InputStream</code> of a
* <code>HttpURLConnection</code>.
*
* @param conn the <code>HttpURLConnection</code>.
*
* @return the parsed JSON object.
*
* @throws IOException thrown if the <code>InputStream</code> could not be
* JSON parsed.
*/
static Object jsonParse(HttpURLConnection conn) throws IOException {
try {
JSONParser parser = new JSONParser();
return parser.parse(new InputStreamReader(conn.getInputStream(), Charsets.UTF_8));
} catch (ParseException ex) {
throw new IOException("JSON parser error, " + ex.getMessage(), ex);
}
}
}
| 4,706 | 33.357664 | 88 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.http.client;
import java.util.ArrayList;
import java.util.EnumSet;
import java.util.List;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.ContentSummary;
import org.apache.hadoop.fs.DelegationTokenRenewer;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileChecksum;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PositionedReadable;
import org.apache.hadoop.fs.Seekable;
import org.apache.hadoop.fs.XAttrCodec;
import org.apache.hadoop.fs.XAttrSetFlag;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.lib.wsrs.EnumSetParam;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.TokenIdentifier;
import org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticatedURL;
import org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticator;
import org.apache.hadoop.security.token.delegation.web.KerberosDelegationTokenAuthenticator;
import org.apache.hadoop.util.HttpExceptionUtils;
import org.apache.hadoop.util.Progressable;
import org.apache.hadoop.util.ReflectionUtils;
import org.apache.hadoop.util.StringUtils;
import org.json.simple.JSONArray;
import org.json.simple.JSONObject;
import org.json.simple.parser.JSONParser;
import org.json.simple.parser.ParseException;
import com.google.common.base.Preconditions;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import java.io.BufferedInputStream;
import java.io.BufferedOutputStream;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.FileNotFoundException;
import java.io.FilterInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.net.HttpURLConnection;
import java.net.URI;
import java.net.URISyntaxException;
import java.net.URL;
import java.security.PrivilegedExceptionAction;
import java.text.MessageFormat;
import java.util.HashMap;
import java.util.Map;
/**
* HttpFSServer implementation of the FileSystemAccess FileSystem.
* <p>
* This implementation allows a user to access HDFS over HTTP via a HttpFSServer server.
*/
@InterfaceAudience.Private
public class HttpFSFileSystem extends FileSystem
implements DelegationTokenRenewer.Renewable {
public static final String SERVICE_NAME = HttpFSUtils.SERVICE_NAME;
public static final String SERVICE_VERSION = HttpFSUtils.SERVICE_VERSION;
public static final String SCHEME = "webhdfs";
public static final String OP_PARAM = "op";
public static final String DO_AS_PARAM = "doas";
public static final String OVERWRITE_PARAM = "overwrite";
public static final String REPLICATION_PARAM = "replication";
public static final String BLOCKSIZE_PARAM = "blocksize";
public static final String PERMISSION_PARAM = "permission";
public static final String ACLSPEC_PARAM = "aclspec";
public static final String DESTINATION_PARAM = "destination";
public static final String RECURSIVE_PARAM = "recursive";
public static final String SOURCES_PARAM = "sources";
public static final String OWNER_PARAM = "owner";
public static final String GROUP_PARAM = "group";
public static final String MODIFICATION_TIME_PARAM = "modificationtime";
public static final String ACCESS_TIME_PARAM = "accesstime";
public static final String XATTR_NAME_PARAM = "xattr.name";
public static final String XATTR_VALUE_PARAM = "xattr.value";
public static final String XATTR_SET_FLAG_PARAM = "flag";
public static final String XATTR_ENCODING_PARAM = "encoding";
public static final String NEW_LENGTH_PARAM = "newlength";
public static final Short DEFAULT_PERMISSION = 0755;
public static final String ACLSPEC_DEFAULT = "";
public static final String RENAME_JSON = "boolean";
public static final String TRUNCATE_JSON = "boolean";
public static final String DELETE_JSON = "boolean";
public static final String MKDIRS_JSON = "boolean";
public static final String HOME_DIR_JSON = "Path";
public static final String SET_REPLICATION_JSON = "boolean";
public static final String UPLOAD_CONTENT_TYPE= "application/octet-stream";
public static enum FILE_TYPE {
FILE, DIRECTORY, SYMLINK;
public static FILE_TYPE getType(FileStatus fileStatus) {
if (fileStatus.isFile()) {
return FILE;
}
if (fileStatus.isDirectory()) {
return DIRECTORY;
}
if (fileStatus.isSymlink()) {
return SYMLINK;
}
throw new IllegalArgumentException("Could not determine filetype for: " +
fileStatus.getPath());
}
}
public static final String FILE_STATUSES_JSON = "FileStatuses";
public static final String FILE_STATUS_JSON = "FileStatus";
public static final String PATH_SUFFIX_JSON = "pathSuffix";
public static final String TYPE_JSON = "type";
public static final String LENGTH_JSON = "length";
public static final String OWNER_JSON = "owner";
public static final String GROUP_JSON = "group";
public static final String PERMISSION_JSON = "permission";
public static final String ACCESS_TIME_JSON = "accessTime";
public static final String MODIFICATION_TIME_JSON = "modificationTime";
public static final String BLOCK_SIZE_JSON = "blockSize";
public static final String REPLICATION_JSON = "replication";
public static final String XATTRS_JSON = "XAttrs";
public static final String XATTR_NAME_JSON = "name";
public static final String XATTR_VALUE_JSON = "value";
public static final String XATTRNAMES_JSON = "XAttrNames";
public static final String FILE_CHECKSUM_JSON = "FileChecksum";
public static final String CHECKSUM_ALGORITHM_JSON = "algorithm";
public static final String CHECKSUM_BYTES_JSON = "bytes";
public static final String CHECKSUM_LENGTH_JSON = "length";
public static final String CONTENT_SUMMARY_JSON = "ContentSummary";
public static final String CONTENT_SUMMARY_DIRECTORY_COUNT_JSON = "directoryCount";
public static final String CONTENT_SUMMARY_FILE_COUNT_JSON = "fileCount";
public static final String CONTENT_SUMMARY_LENGTH_JSON = "length";
public static final String CONTENT_SUMMARY_QUOTA_JSON = "quota";
public static final String CONTENT_SUMMARY_SPACE_CONSUMED_JSON = "spaceConsumed";
public static final String CONTENT_SUMMARY_SPACE_QUOTA_JSON = "spaceQuota";
public static final String ACL_STATUS_JSON = "AclStatus";
public static final String ACL_STICKY_BIT_JSON = "stickyBit";
public static final String ACL_ENTRIES_JSON = "entries";
public static final String ACL_BIT_JSON = "aclBit";
public static final int HTTP_TEMPORARY_REDIRECT = 307;
private static final String HTTP_GET = "GET";
private static final String HTTP_PUT = "PUT";
private static final String HTTP_POST = "POST";
private static final String HTTP_DELETE = "DELETE";
@InterfaceAudience.Private
public static enum Operation {
OPEN(HTTP_GET), GETFILESTATUS(HTTP_GET), LISTSTATUS(HTTP_GET),
GETHOMEDIRECTORY(HTTP_GET), GETCONTENTSUMMARY(HTTP_GET),
GETFILECHECKSUM(HTTP_GET), GETFILEBLOCKLOCATIONS(HTTP_GET),
INSTRUMENTATION(HTTP_GET), GETACLSTATUS(HTTP_GET),
APPEND(HTTP_POST), CONCAT(HTTP_POST), TRUNCATE(HTTP_POST),
CREATE(HTTP_PUT), MKDIRS(HTTP_PUT), RENAME(HTTP_PUT), SETOWNER(HTTP_PUT),
SETPERMISSION(HTTP_PUT), SETREPLICATION(HTTP_PUT), SETTIMES(HTTP_PUT),
MODIFYACLENTRIES(HTTP_PUT), REMOVEACLENTRIES(HTTP_PUT),
REMOVEDEFAULTACL(HTTP_PUT), REMOVEACL(HTTP_PUT), SETACL(HTTP_PUT),
DELETE(HTTP_DELETE), SETXATTR(HTTP_PUT), GETXATTRS(HTTP_GET),
REMOVEXATTR(HTTP_PUT), LISTXATTRS(HTTP_GET);
private String httpMethod;
Operation(String httpMethod) {
this.httpMethod = httpMethod;
}
public String getMethod() {
return httpMethod;
}
}
private DelegationTokenAuthenticatedURL authURL;
private DelegationTokenAuthenticatedURL.Token authToken =
new DelegationTokenAuthenticatedURL.Token();
private URI uri;
private Path workingDir;
private UserGroupInformation realUser;
/**
* Convenience method that creates a <code>HttpURLConnection</code> for the
* HttpFSServer file system operations.
* <p>
* This methods performs and injects any needed authentication credentials
* via the {@link #getConnection(URL, String)} method
*
* @param method the HTTP method.
* @param params the query string parameters.
* @param path the file path
* @param makeQualified if the path should be 'makeQualified'
*
* @return a <code>HttpURLConnection</code> for the HttpFSServer server,
* authenticated and ready to use for the specified path and file system operation.
*
* @throws IOException thrown if an IO error occurs.
*/
private HttpURLConnection getConnection(final String method,
Map<String, String> params, Path path, boolean makeQualified)
throws IOException {
return getConnection(method, params, null, path, makeQualified);
}
/**
* Convenience method that creates a <code>HttpURLConnection</code> for the
* HttpFSServer file system operations.
* <p/>
* This methods performs and injects any needed authentication credentials
* via the {@link #getConnection(URL, String)} method
*
* @param method the HTTP method.
* @param params the query string parameters.
* @param multiValuedParams multi valued parameters of the query string
* @param path the file path
* @param makeQualified if the path should be 'makeQualified'
*
* @return HttpURLConnection a <code>HttpURLConnection</code> for the
* HttpFSServer server, authenticated and ready to use for the
* specified path and file system operation.
*
* @throws IOException thrown if an IO error occurs.
*/
private HttpURLConnection getConnection(final String method,
Map<String, String> params, Map<String, List<String>> multiValuedParams,
Path path, boolean makeQualified) throws IOException {
if (makeQualified) {
path = makeQualified(path);
}
final URL url = HttpFSUtils.createURL(path, params, multiValuedParams);
try {
return UserGroupInformation.getCurrentUser().doAs(
new PrivilegedExceptionAction<HttpURLConnection>() {
@Override
public HttpURLConnection run() throws Exception {
return getConnection(url, method);
}
}
);
} catch (Exception ex) {
if (ex instanceof IOException) {
throw (IOException) ex;
} else {
throw new IOException(ex);
}
}
}
/**
* Convenience method that creates a <code>HttpURLConnection</code> for the specified URL.
* <p>
* This methods performs and injects any needed authentication credentials.
*
* @param url url to connect to.
* @param method the HTTP method.
*
* @return a <code>HttpURLConnection</code> for the HttpFSServer server, authenticated and ready to use for
* the specified path and file system operation.
*
* @throws IOException thrown if an IO error occurs.
*/
private HttpURLConnection getConnection(URL url, String method) throws IOException {
try {
HttpURLConnection conn = authURL.openConnection(url, authToken);
conn.setRequestMethod(method);
if (method.equals(HTTP_POST) || method.equals(HTTP_PUT)) {
conn.setDoOutput(true);
}
return conn;
} catch (Exception ex) {
throw new IOException(ex);
}
}
/**
* Called after a new FileSystem instance is constructed.
*
* @param name a uri whose authority section names the host, port, etc. for this FileSystem
* @param conf the configuration
*/
@Override
public void initialize(URI name, Configuration conf) throws IOException {
UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
//the real use is the one that has the Kerberos credentials needed for
//SPNEGO to work
realUser = ugi.getRealUser();
if (realUser == null) {
realUser = UserGroupInformation.getLoginUser();
}
super.initialize(name, conf);
try {
uri = new URI(name.getScheme() + "://" + name.getAuthority());
} catch (URISyntaxException ex) {
throw new IOException(ex);
}
Class<? extends DelegationTokenAuthenticator> klass =
getConf().getClass("httpfs.authenticator.class",
KerberosDelegationTokenAuthenticator.class,
DelegationTokenAuthenticator.class);
DelegationTokenAuthenticator authenticator =
ReflectionUtils.newInstance(klass, getConf());
authURL = new DelegationTokenAuthenticatedURL(authenticator);
}
@Override
public String getScheme() {
return SCHEME;
}
/**
* Returns a URI whose scheme and authority identify this FileSystem.
*
* @return the URI whose scheme and authority identify this FileSystem.
*/
@Override
public URI getUri() {
return uri;
}
/**
* Get the default port for this file system.
* @return the default port or 0 if there isn't one
*/
@Override
protected int getDefaultPort() {
return getConf().getInt(HdfsClientConfigKeys.DFS_NAMENODE_HTTP_PORT_KEY,
DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_DEFAULT);
}
/**
* HttpFSServer subclass of the <code>FSDataInputStream</code>.
* <p>
* This implementation does not support the
* <code>PositionReadable</code> and <code>Seekable</code> methods.
*/
private static class HttpFSDataInputStream extends FilterInputStream implements Seekable, PositionedReadable {
protected HttpFSDataInputStream(InputStream in, int bufferSize) {
super(new BufferedInputStream(in, bufferSize));
}
@Override
public int read(long position, byte[] buffer, int offset, int length) throws IOException {
throw new UnsupportedOperationException();
}
@Override
public void readFully(long position, byte[] buffer, int offset, int length) throws IOException {
throw new UnsupportedOperationException();
}
@Override
public void readFully(long position, byte[] buffer) throws IOException {
throw new UnsupportedOperationException();
}
@Override
public void seek(long pos) throws IOException {
throw new UnsupportedOperationException();
}
@Override
public long getPos() throws IOException {
throw new UnsupportedOperationException();
}
@Override
public boolean seekToNewSource(long targetPos) throws IOException {
throw new UnsupportedOperationException();
}
}
/**
* Opens an FSDataInputStream at the indicated Path.
* <p>
* IMPORTANT: the returned <code>FSDataInputStream</code> does not support the
* <code>PositionReadable</code> and <code>Seekable</code> methods.
*
* @param f the file name to open
* @param bufferSize the size of the buffer to be used.
*/
@Override
public FSDataInputStream open(Path f, int bufferSize) throws IOException {
Map<String, String> params = new HashMap<String, String>();
params.put(OP_PARAM, Operation.OPEN.toString());
HttpURLConnection conn = getConnection(Operation.OPEN.getMethod(), params,
f, true);
HttpExceptionUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
return new FSDataInputStream(
new HttpFSDataInputStream(conn.getInputStream(), bufferSize));
}
/**
* HttpFSServer subclass of the <code>FSDataOutputStream</code>.
* <p>
* This implementation closes the underlying HTTP connection validating the Http connection status
* at closing time.
*/
private static class HttpFSDataOutputStream extends FSDataOutputStream {
private HttpURLConnection conn;
private int closeStatus;
public HttpFSDataOutputStream(HttpURLConnection conn, OutputStream out, int closeStatus, Statistics stats)
throws IOException {
super(out, stats);
this.conn = conn;
this.closeStatus = closeStatus;
}
@Override
public void close() throws IOException {
try {
super.close();
} finally {
HttpExceptionUtils.validateResponse(conn, closeStatus);
}
}
}
/**
* Converts a <code>FsPermission</code> to a Unix octal representation.
*
* @param p the permission.
*
* @return the Unix string symbolic reprentation.
*/
public static String permissionToString(FsPermission p) {
return Integer.toString((p == null) ? DEFAULT_PERMISSION : p.toShort(), 8);
}
/*
* Common handling for uploading data for create and append operations.
*/
private FSDataOutputStream uploadData(String method, Path f, Map<String, String> params,
int bufferSize, int expectedStatus) throws IOException {
HttpURLConnection conn = getConnection(method, params, f, true);
conn.setInstanceFollowRedirects(false);
boolean exceptionAlreadyHandled = false;
try {
if (conn.getResponseCode() == HTTP_TEMPORARY_REDIRECT) {
exceptionAlreadyHandled = true;
String location = conn.getHeaderField("Location");
if (location != null) {
conn = getConnection(new URL(location), method);
conn.setRequestProperty("Content-Type", UPLOAD_CONTENT_TYPE);
try {
OutputStream os = new BufferedOutputStream(conn.getOutputStream(), bufferSize);
return new HttpFSDataOutputStream(conn, os, expectedStatus, statistics);
} catch (IOException ex) {
HttpExceptionUtils.validateResponse(conn, expectedStatus);
throw ex;
}
} else {
HttpExceptionUtils.validateResponse(conn, HTTP_TEMPORARY_REDIRECT);
throw new IOException("Missing HTTP 'Location' header for [" + conn.getURL() + "]");
}
} else {
throw new IOException(
MessageFormat.format("Expected HTTP status was [307], received [{0}]",
conn.getResponseCode()));
}
} catch (IOException ex) {
if (exceptionAlreadyHandled) {
throw ex;
} else {
HttpExceptionUtils.validateResponse(conn, HTTP_TEMPORARY_REDIRECT);
throw ex;
}
}
}
/**
* Opens an FSDataOutputStream at the indicated Path with write-progress
* reporting.
* <p>
* IMPORTANT: The <code>Progressable</code> parameter is not used.
*
* @param f the file name to open.
* @param permission file permission.
* @param overwrite if a file with this name already exists, then if true,
* the file will be overwritten, and if false an error will be thrown.
* @param bufferSize the size of the buffer to be used.
* @param replication required block replication for the file.
* @param blockSize block size.
* @param progress progressable.
*
* @throws IOException
* @see #setPermission(Path, FsPermission)
*/
@Override
public FSDataOutputStream create(Path f, FsPermission permission,
boolean overwrite, int bufferSize,
short replication, long blockSize,
Progressable progress) throws IOException {
Map<String, String> params = new HashMap<String, String>();
params.put(OP_PARAM, Operation.CREATE.toString());
params.put(OVERWRITE_PARAM, Boolean.toString(overwrite));
params.put(REPLICATION_PARAM, Short.toString(replication));
params.put(BLOCKSIZE_PARAM, Long.toString(blockSize));
params.put(PERMISSION_PARAM, permissionToString(permission));
return uploadData(Operation.CREATE.getMethod(), f, params, bufferSize,
HttpURLConnection.HTTP_CREATED);
}
/**
* Append to an existing file (optional operation).
* <p>
* IMPORTANT: The <code>Progressable</code> parameter is not used.
*
* @param f the existing file to be appended.
* @param bufferSize the size of the buffer to be used.
* @param progress for reporting progress if it is not null.
*
* @throws IOException
*/
@Override
public FSDataOutputStream append(Path f, int bufferSize,
Progressable progress) throws IOException {
Map<String, String> params = new HashMap<String, String>();
params.put(OP_PARAM, Operation.APPEND.toString());
return uploadData(Operation.APPEND.getMethod(), f, params, bufferSize,
HttpURLConnection.HTTP_OK);
}
/**
* Truncate a file.
*
* @param f the file to be truncated.
* @param newLength The size the file is to be truncated to.
*
* @throws IOException
*/
@Override
public boolean truncate(Path f, long newLength) throws IOException {
Map<String, String> params = new HashMap<String, String>();
params.put(OP_PARAM, Operation.TRUNCATE.toString());
params.put(NEW_LENGTH_PARAM, Long.toString(newLength));
HttpURLConnection conn = getConnection(Operation.TRUNCATE.getMethod(),
params, f, true);
JSONObject json = (JSONObject) HttpFSUtils.jsonParse(conn);
return (Boolean) json.get(TRUNCATE_JSON);
}
/**
* Concat existing files together.
* @param f the path to the target destination.
* @param psrcs the paths to the sources to use for the concatenation.
*
* @throws IOException
*/
@Override
public void concat(Path f, Path[] psrcs) throws IOException {
List<String> strPaths = new ArrayList<String>(psrcs.length);
for(Path psrc : psrcs) {
strPaths.add(psrc.toUri().getPath());
}
String srcs = StringUtils.join(",", strPaths);
Map<String, String> params = new HashMap<String, String>();
params.put(OP_PARAM, Operation.CONCAT.toString());
params.put(SOURCES_PARAM, srcs);
HttpURLConnection conn = getConnection(Operation.CONCAT.getMethod(),
params, f, true);
HttpExceptionUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
}
/**
* Renames Path src to Path dst. Can take place on local fs
* or remote DFS.
*/
@Override
public boolean rename(Path src, Path dst) throws IOException {
Map<String, String> params = new HashMap<String, String>();
params.put(OP_PARAM, Operation.RENAME.toString());
params.put(DESTINATION_PARAM, dst.toString());
HttpURLConnection conn = getConnection(Operation.RENAME.getMethod(),
params, src, true);
HttpExceptionUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
JSONObject json = (JSONObject) HttpFSUtils.jsonParse(conn);
return (Boolean) json.get(RENAME_JSON);
}
/**
* Delete a file.
*
* @deprecated Use delete(Path, boolean) instead
*/
@Deprecated
@Override
public boolean delete(Path f) throws IOException {
return delete(f, false);
}
/**
* Delete a file.
*
* @param f the path to delete.
* @param recursive if path is a directory and set to
* true, the directory is deleted else throws an exception. In
* case of a file the recursive can be set to either true or false.
*
* @return true if delete is successful else false.
*
* @throws IOException
*/
@Override
public boolean delete(Path f, boolean recursive) throws IOException {
Map<String, String> params = new HashMap<String, String>();
params.put(OP_PARAM, Operation.DELETE.toString());
params.put(RECURSIVE_PARAM, Boolean.toString(recursive));
HttpURLConnection conn = getConnection(Operation.DELETE.getMethod(),
params, f, true);
HttpExceptionUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
JSONObject json = (JSONObject) HttpFSUtils.jsonParse(conn);
return (Boolean) json.get(DELETE_JSON);
}
/**
* List the statuses of the files/directories in the given path if the path is
* a directory.
*
* @param f given path
*
* @return the statuses of the files/directories in the given patch
*
* @throws IOException
*/
@Override
public FileStatus[] listStatus(Path f) throws IOException {
Map<String, String> params = new HashMap<String, String>();
params.put(OP_PARAM, Operation.LISTSTATUS.toString());
HttpURLConnection conn = getConnection(Operation.LISTSTATUS.getMethod(),
params, f, true);
HttpExceptionUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
JSONObject json = (JSONObject) HttpFSUtils.jsonParse(conn);
json = (JSONObject) json.get(FILE_STATUSES_JSON);
JSONArray jsonArray = (JSONArray) json.get(FILE_STATUS_JSON);
FileStatus[] array = new FileStatus[jsonArray.size()];
f = makeQualified(f);
for (int i = 0; i < jsonArray.size(); i++) {
array[i] = createFileStatus(f, (JSONObject) jsonArray.get(i));
}
return array;
}
/**
* Set the current working directory for the given file system. All relative
* paths will be resolved relative to it.
*
* @param newDir new directory.
*/
@Override
public void setWorkingDirectory(Path newDir) {
workingDir = newDir;
}
/**
* Get the current working directory for the given file system
*
* @return the directory pathname
*/
@Override
public Path getWorkingDirectory() {
if (workingDir == null) {
workingDir = getHomeDirectory();
}
return workingDir;
}
/**
* Make the given file and all non-existent parents into
* directories. Has the semantics of Unix 'mkdir -p'.
* Existence of the directory hierarchy is not an error.
*/
@Override
public boolean mkdirs(Path f, FsPermission permission) throws IOException {
Map<String, String> params = new HashMap<String, String>();
params.put(OP_PARAM, Operation.MKDIRS.toString());
params.put(PERMISSION_PARAM, permissionToString(permission));
HttpURLConnection conn = getConnection(Operation.MKDIRS.getMethod(),
params, f, true);
HttpExceptionUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
JSONObject json = (JSONObject) HttpFSUtils.jsonParse(conn);
return (Boolean) json.get(MKDIRS_JSON);
}
/**
* Return a file status object that represents the path.
*
* @param f The path we want information from
*
* @return a FileStatus object
*
* @throws FileNotFoundException when the path does not exist;
* IOException see specific implementation
*/
@Override
public FileStatus getFileStatus(Path f) throws IOException {
Map<String, String> params = new HashMap<String, String>();
params.put(OP_PARAM, Operation.GETFILESTATUS.toString());
HttpURLConnection conn = getConnection(Operation.GETFILESTATUS.getMethod(),
params, f, true);
HttpExceptionUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
JSONObject json = (JSONObject) HttpFSUtils.jsonParse(conn);
json = (JSONObject) json.get(FILE_STATUS_JSON);
f = makeQualified(f);
return createFileStatus(f, json);
}
/**
* Return the current user's home directory in this filesystem.
* The default implementation returns "/user/$USER/".
*/
@Override
public Path getHomeDirectory() {
Map<String, String> params = new HashMap<String, String>();
params.put(OP_PARAM, Operation.GETHOMEDIRECTORY.toString());
try {
HttpURLConnection conn =
getConnection(Operation.GETHOMEDIRECTORY.getMethod(), params,
new Path(getUri().toString(), "/"), false);
HttpExceptionUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
JSONObject json = (JSONObject) HttpFSUtils.jsonParse(conn);
return new Path((String) json.get(HOME_DIR_JSON));
} catch (IOException ex) {
throw new RuntimeException(ex);
}
}
/**
* Set owner of a path (i.e. a file or a directory).
* The parameters username and groupname cannot both be null.
*
* @param p The path
* @param username If it is null, the original username remains unchanged.
* @param groupname If it is null, the original groupname remains unchanged.
*/
@Override
public void setOwner(Path p, String username, String groupname)
throws IOException {
Map<String, String> params = new HashMap<String, String>();
params.put(OP_PARAM, Operation.SETOWNER.toString());
params.put(OWNER_PARAM, username);
params.put(GROUP_PARAM, groupname);
HttpURLConnection conn = getConnection(Operation.SETOWNER.getMethod(),
params, p, true);
HttpExceptionUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
}
/**
* Set permission of a path.
*
* @param p path.
* @param permission permission.
*/
@Override
public void setPermission(Path p, FsPermission permission) throws IOException {
Map<String, String> params = new HashMap<String, String>();
params.put(OP_PARAM, Operation.SETPERMISSION.toString());
params.put(PERMISSION_PARAM, permissionToString(permission));
HttpURLConnection conn = getConnection(Operation.SETPERMISSION.getMethod(), params, p, true);
HttpExceptionUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
}
/**
* Set access time of a file
*
* @param p The path
* @param mtime Set the modification time of this file.
* The number of milliseconds since Jan 1, 1970.
* A value of -1 means that this call should not set modification time.
* @param atime Set the access time of this file.
* The number of milliseconds since Jan 1, 1970.
* A value of -1 means that this call should not set access time.
*/
@Override
public void setTimes(Path p, long mtime, long atime) throws IOException {
Map<String, String> params = new HashMap<String, String>();
params.put(OP_PARAM, Operation.SETTIMES.toString());
params.put(MODIFICATION_TIME_PARAM, Long.toString(mtime));
params.put(ACCESS_TIME_PARAM, Long.toString(atime));
HttpURLConnection conn = getConnection(Operation.SETTIMES.getMethod(),
params, p, true);
HttpExceptionUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
}
/**
* Set replication for an existing file.
*
* @param src file name
* @param replication new replication
*
* @return true if successful;
* false if file does not exist or is a directory
*
* @throws IOException
*/
@Override
public boolean setReplication(Path src, short replication)
throws IOException {
Map<String, String> params = new HashMap<String, String>();
params.put(OP_PARAM, Operation.SETREPLICATION.toString());
params.put(REPLICATION_PARAM, Short.toString(replication));
HttpURLConnection conn =
getConnection(Operation.SETREPLICATION.getMethod(), params, src, true);
HttpExceptionUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
JSONObject json = (JSONObject) HttpFSUtils.jsonParse(conn);
return (Boolean) json.get(SET_REPLICATION_JSON);
}
/**
* Modify the ACL entries for a file.
*
* @param path Path to modify
* @param aclSpec describing modifications
* @throws IOException
*/
@Override
public void modifyAclEntries(Path path, List<AclEntry> aclSpec)
throws IOException {
Map<String, String> params = new HashMap<String, String>();
params.put(OP_PARAM, Operation.MODIFYACLENTRIES.toString());
params.put(ACLSPEC_PARAM, AclEntry.aclSpecToString(aclSpec));
HttpURLConnection conn = getConnection(
Operation.MODIFYACLENTRIES.getMethod(), params, path, true);
HttpExceptionUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
}
/**
* Remove the specified ACL entries from a file
* @param path Path to modify
* @param aclSpec describing entries to remove
* @throws IOException
*/
@Override
public void removeAclEntries(Path path, List<AclEntry> aclSpec)
throws IOException {
Map<String, String> params = new HashMap<String, String>();
params.put(OP_PARAM, Operation.REMOVEACLENTRIES.toString());
params.put(ACLSPEC_PARAM, AclEntry.aclSpecToString(aclSpec));
HttpURLConnection conn = getConnection(
Operation.REMOVEACLENTRIES.getMethod(), params, path, true);
HttpExceptionUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
}
/**
* Removes the default ACL for the given file
* @param path Path from which to remove the default ACL.
* @throws IOException
*/
@Override
public void removeDefaultAcl(Path path) throws IOException {
Map<String, String> params = new HashMap<String, String>();
params.put(OP_PARAM, Operation.REMOVEDEFAULTACL.toString());
HttpURLConnection conn = getConnection(
Operation.REMOVEDEFAULTACL.getMethod(), params, path, true);
HttpExceptionUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
}
/**
* Remove all ACLs from a file
* @param path Path from which to remove all ACLs
* @throws IOException
*/
@Override
public void removeAcl(Path path) throws IOException {
Map<String, String> params = new HashMap<String, String>();
params.put(OP_PARAM, Operation.REMOVEACL.toString());
HttpURLConnection conn = getConnection(Operation.REMOVEACL.getMethod(),
params, path, true);
HttpExceptionUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
}
/**
* Set the ACLs for the given file
* @param path Path to modify
* @param aclSpec describing modifications, must include
* entries for user, group, and others for compatibility
* with permission bits.
* @throws IOException
*/
@Override
public void setAcl(Path path, List<AclEntry> aclSpec) throws IOException {
Map<String, String> params = new HashMap<String, String>();
params.put(OP_PARAM, Operation.SETACL.toString());
params.put(ACLSPEC_PARAM, AclEntry.aclSpecToString(aclSpec));
HttpURLConnection conn = getConnection(Operation.SETACL.getMethod(),
params, path, true);
HttpExceptionUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
}
/**
* Get the ACL information for a given file
* @param path Path to acquire ACL info for
* @return the ACL information in JSON format
* @throws IOException
*/
@Override
public AclStatus getAclStatus(Path path) throws IOException {
Map<String, String> params = new HashMap<String, String>();
params.put(OP_PARAM, Operation.GETACLSTATUS.toString());
HttpURLConnection conn = getConnection(Operation.GETACLSTATUS.getMethod(),
params, path, true);
HttpExceptionUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
JSONObject json = (JSONObject) HttpFSUtils.jsonParse(conn);
json = (JSONObject) json.get(ACL_STATUS_JSON);
return createAclStatus(json);
}
private FileStatus createFileStatus(Path parent, JSONObject json) {
String pathSuffix = (String) json.get(PATH_SUFFIX_JSON);
Path path = (pathSuffix.equals("")) ? parent : new Path(parent, pathSuffix);
FILE_TYPE type = FILE_TYPE.valueOf((String) json.get(TYPE_JSON));
long len = (Long) json.get(LENGTH_JSON);
String owner = (String) json.get(OWNER_JSON);
String group = (String) json.get(GROUP_JSON);
FsPermission permission =
new FsPermission(Short.parseShort((String) json.get(PERMISSION_JSON), 8));
long aTime = (Long) json.get(ACCESS_TIME_JSON);
long mTime = (Long) json.get(MODIFICATION_TIME_JSON);
long blockSize = (Long) json.get(BLOCK_SIZE_JSON);
short replication = ((Long) json.get(REPLICATION_JSON)).shortValue();
FileStatus fileStatus = null;
switch (type) {
case FILE:
case DIRECTORY:
fileStatus = new FileStatus(len, (type == FILE_TYPE.DIRECTORY),
replication, blockSize, mTime, aTime,
permission, owner, group, path);
break;
case SYMLINK:
Path symLink = null;
fileStatus = new FileStatus(len, false,
replication, blockSize, mTime, aTime,
permission, owner, group, symLink,
path);
}
return fileStatus;
}
/**
* Convert the given JSON object into an AclStatus
* @param json Input JSON representing the ACLs
* @return Resulting AclStatus
*/
private AclStatus createAclStatus(JSONObject json) {
AclStatus.Builder aclStatusBuilder = new AclStatus.Builder()
.owner((String) json.get(OWNER_JSON))
.group((String) json.get(GROUP_JSON))
.stickyBit((Boolean) json.get(ACL_STICKY_BIT_JSON));
JSONArray entries = (JSONArray) json.get(ACL_ENTRIES_JSON);
for ( Object e : entries ) {
aclStatusBuilder.addEntry(AclEntry.parseAclEntry(e.toString(), true));
}
return aclStatusBuilder.build();
}
@Override
public ContentSummary getContentSummary(Path f) throws IOException {
Map<String, String> params = new HashMap<String, String>();
params.put(OP_PARAM, Operation.GETCONTENTSUMMARY.toString());
HttpURLConnection conn =
getConnection(Operation.GETCONTENTSUMMARY.getMethod(), params, f, true);
HttpExceptionUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
JSONObject json = (JSONObject) ((JSONObject)
HttpFSUtils.jsonParse(conn)).get(CONTENT_SUMMARY_JSON);
return new ContentSummary.Builder().
length((Long) json.get(CONTENT_SUMMARY_LENGTH_JSON)).
fileCount((Long) json.get(CONTENT_SUMMARY_FILE_COUNT_JSON)).
directoryCount((Long) json.get(CONTENT_SUMMARY_DIRECTORY_COUNT_JSON)).
quota((Long) json.get(CONTENT_SUMMARY_QUOTA_JSON)).
spaceConsumed((Long) json.get(CONTENT_SUMMARY_SPACE_CONSUMED_JSON)).
spaceQuota((Long) json.get(CONTENT_SUMMARY_SPACE_QUOTA_JSON)).build();
}
@Override
public FileChecksum getFileChecksum(Path f) throws IOException {
Map<String, String> params = new HashMap<String, String>();
params.put(OP_PARAM, Operation.GETFILECHECKSUM.toString());
HttpURLConnection conn =
getConnection(Operation.GETFILECHECKSUM.getMethod(), params, f, true);
HttpExceptionUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
final JSONObject json = (JSONObject) ((JSONObject)
HttpFSUtils.jsonParse(conn)).get(FILE_CHECKSUM_JSON);
return new FileChecksum() {
@Override
public String getAlgorithmName() {
return (String) json.get(CHECKSUM_ALGORITHM_JSON);
}
@Override
public int getLength() {
return ((Long) json.get(CHECKSUM_LENGTH_JSON)).intValue();
}
@Override
public byte[] getBytes() {
return StringUtils.hexStringToByte((String) json.get(CHECKSUM_BYTES_JSON));
}
@Override
public void write(DataOutput out) throws IOException {
throw new UnsupportedOperationException();
}
@Override
public void readFields(DataInput in) throws IOException {
throw new UnsupportedOperationException();
}
};
}
@Override
public Token<?> getDelegationToken(final String renewer)
throws IOException {
try {
return UserGroupInformation.getCurrentUser().doAs(
new PrivilegedExceptionAction<Token<?>>() {
@Override
public Token<?> run() throws Exception {
return authURL.getDelegationToken(uri.toURL(), authToken,
renewer);
}
}
);
} catch (Exception ex) {
if (ex instanceof IOException) {
throw (IOException) ex;
} else {
throw new IOException(ex);
}
}
}
public long renewDelegationToken(final Token<?> token) throws IOException {
try {
return UserGroupInformation.getCurrentUser().doAs(
new PrivilegedExceptionAction<Long>() {
@Override
public Long run() throws Exception {
return authURL.renewDelegationToken(uri.toURL(), authToken);
}
}
);
} catch (Exception ex) {
if (ex instanceof IOException) {
throw (IOException) ex;
} else {
throw new IOException(ex);
}
}
}
public void cancelDelegationToken(final Token<?> token) throws IOException {
authURL.cancelDelegationToken(uri.toURL(), authToken);
}
@Override
public Token<?> getRenewToken() {
return null; //TODO : for renewer
}
@Override
@SuppressWarnings("unchecked")
public <T extends TokenIdentifier> void setDelegationToken(Token<T> token) {
//TODO : for renewer
}
@Override
public void setXAttr(Path f, String name, byte[] value,
EnumSet<XAttrSetFlag> flag) throws IOException {
Map<String, String> params = new HashMap<String, String>();
params.put(OP_PARAM, Operation.SETXATTR.toString());
params.put(XATTR_NAME_PARAM, name);
if (value != null) {
params.put(XATTR_VALUE_PARAM,
XAttrCodec.encodeValue(value, XAttrCodec.HEX));
}
params.put(XATTR_SET_FLAG_PARAM, EnumSetParam.toString(flag));
HttpURLConnection conn = getConnection(Operation.SETXATTR.getMethod(),
params, f, true);
HttpExceptionUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
}
@Override
public byte[] getXAttr(Path f, String name) throws IOException {
Map<String, String> params = new HashMap<String, String>();
params.put(OP_PARAM, Operation.GETXATTRS.toString());
params.put(XATTR_NAME_PARAM, name);
HttpURLConnection conn = getConnection(Operation.GETXATTRS.getMethod(),
params, f, true);
HttpExceptionUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
JSONObject json = (JSONObject) HttpFSUtils.jsonParse(conn);
Map<String, byte[]> xAttrs = createXAttrMap(
(JSONArray) json.get(XATTRS_JSON));
return xAttrs != null ? xAttrs.get(name) : null;
}
/** Convert xAttrs json to xAttrs map */
private Map<String, byte[]> createXAttrMap(JSONArray jsonArray)
throws IOException {
Map<String, byte[]> xAttrs = Maps.newHashMap();
for (Object obj : jsonArray) {
JSONObject jsonObj = (JSONObject) obj;
final String name = (String)jsonObj.get(XATTR_NAME_JSON);
final byte[] value = XAttrCodec.decodeValue(
(String)jsonObj.get(XATTR_VALUE_JSON));
xAttrs.put(name, value);
}
return xAttrs;
}
/** Convert xAttr names json to names list */
private List<String> createXAttrNames(String xattrNamesStr) throws IOException {
JSONParser parser = new JSONParser();
JSONArray jsonArray;
try {
jsonArray = (JSONArray)parser.parse(xattrNamesStr);
List<String> names = Lists.newArrayListWithCapacity(jsonArray.size());
for (Object name : jsonArray) {
names.add((String) name);
}
return names;
} catch (ParseException e) {
throw new IOException("JSON parser error, " + e.getMessage(), e);
}
}
@Override
public Map<String, byte[]> getXAttrs(Path f) throws IOException {
Map<String, String> params = new HashMap<String, String>();
params.put(OP_PARAM, Operation.GETXATTRS.toString());
HttpURLConnection conn = getConnection(Operation.GETXATTRS.getMethod(),
params, f, true);
HttpExceptionUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
JSONObject json = (JSONObject) HttpFSUtils.jsonParse(conn);
return createXAttrMap((JSONArray) json.get(XATTRS_JSON));
}
@Override
public Map<String, byte[]> getXAttrs(Path f, List<String> names)
throws IOException {
Preconditions.checkArgument(names != null && !names.isEmpty(),
"XAttr names cannot be null or empty.");
Map<String, String> params = new HashMap<String, String>();
params.put(OP_PARAM, Operation.GETXATTRS.toString());
Map<String, List<String>> multiValuedParams = Maps.newHashMap();
multiValuedParams.put(XATTR_NAME_PARAM, names);
HttpURLConnection conn = getConnection(Operation.GETXATTRS.getMethod(),
params, multiValuedParams, f, true);
HttpExceptionUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
JSONObject json = (JSONObject) HttpFSUtils.jsonParse(conn);
return createXAttrMap((JSONArray) json.get(XATTRS_JSON));
}
@Override
public List<String> listXAttrs(Path f) throws IOException {
Map<String, String> params = new HashMap<String, String>();
params.put(OP_PARAM, Operation.LISTXATTRS.toString());
HttpURLConnection conn = getConnection(Operation.LISTXATTRS.getMethod(),
params, f, true);
HttpExceptionUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
JSONObject json = (JSONObject) HttpFSUtils.jsonParse(conn);
return createXAttrNames((String) json.get(XATTRNAMES_JSON));
}
@Override
public void removeXAttr(Path f, String name) throws IOException {
Map<String, String> params = new HashMap<String, String>();
params.put(OP_PARAM, Operation.REMOVEXATTR.toString());
params.put(XATTR_NAME_PARAM, name);
HttpURLConnection conn = getConnection(Operation.REMOVEXATTR.getMethod(),
params, f, true);
HttpExceptionUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
}
}
| 46,478 | 36.787805 | 112 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpsFSFileSystem.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.http.client;
/**
* <p>HttpFSServer implementation of the FileSystemAccess FileSystem for SSL.
* </p>
* <p>This implementation allows a user to access HDFS over HTTPS via a
* HttpFSServer server.</p>
*/
public class HttpsFSFileSystem extends HttpFSFileSystem {
public static final String SCHEME = "swebhdfs";
@Override
public String getScheme() {
return SCHEME;
}
}
| 1,221 | 33.914286 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.http.server;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.fs.ContentSummary;
import org.apache.hadoop.fs.FileChecksum;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.GlobFilter;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathFilter;
import org.apache.hadoop.fs.XAttrCodec;
import org.apache.hadoop.fs.XAttrSetFlag;
import org.apache.hadoop.fs.http.client.HttpFSFileSystem;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.protocol.AclException;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.lib.service.FileSystemAccess;
import org.apache.hadoop.util.StringUtils;
import org.json.simple.JSONArray;
import org.json.simple.JSONObject;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.util.EnumSet;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
/**
* FileSystem operation executors used by {@link HttpFSServer}.
*/
@InterfaceAudience.Private
public class FSOperations {
/**
* This class is used to group a FileStatus and an AclStatus together.
* It's needed for the GETFILESTATUS and LISTSTATUS calls, which take
* most info from the FileStatus and a wee bit from the AclStatus.
*/
private static class StatusPair {
private FileStatus fileStatus;
private AclStatus aclStatus;
/**
* Simple constructor
* @param fileStatus Existing FileStatus object
* @param aclStatus Existing AclStatus object
*/
public StatusPair(FileStatus fileStatus, AclStatus aclStatus) {
this.fileStatus = fileStatus;
this.aclStatus = aclStatus;
}
/**
* Create one StatusPair by performing the underlying calls to
* fs.getFileStatus and fs.getAclStatus
* @param fs The FileSystem where 'path' lives
* @param path The file/directory to query
* @throws IOException
*/
public StatusPair(FileSystem fs, Path path) throws IOException {
fileStatus = fs.getFileStatus(path);
aclStatus = null;
try {
aclStatus = fs.getAclStatus(path);
} catch (AclException e) {
/*
* The cause is almost certainly an "ACLS aren't enabled"
* exception, so leave aclStatus at null and carry on.
*/
} catch (UnsupportedOperationException e) {
/* Ditto above - this is the case for a local file system */
}
}
/**
* Return a Map suitable for conversion into JSON format
* @return The JSONish Map
*/
public Map<String,Object> toJson() {
Map<String,Object> json = new LinkedHashMap<String,Object>();
json.put(HttpFSFileSystem.FILE_STATUS_JSON, toJsonInner(true));
return json;
}
/**
* Return in inner part of the JSON for the status - used by both the
* GETFILESTATUS and LISTSTATUS calls.
* @param emptyPathSuffix Whether or not to include PATH_SUFFIX_JSON
* @return The JSONish Map
*/
public Map<String,Object> toJsonInner(boolean emptyPathSuffix) {
Map<String,Object> json = new LinkedHashMap<String,Object>();
json.put(HttpFSFileSystem.PATH_SUFFIX_JSON,
(emptyPathSuffix) ? "" : fileStatus.getPath().getName());
json.put(HttpFSFileSystem.TYPE_JSON,
HttpFSFileSystem.FILE_TYPE.getType(fileStatus).toString());
json.put(HttpFSFileSystem.LENGTH_JSON, fileStatus.getLen());
json.put(HttpFSFileSystem.OWNER_JSON, fileStatus.getOwner());
json.put(HttpFSFileSystem.GROUP_JSON, fileStatus.getGroup());
json.put(HttpFSFileSystem.PERMISSION_JSON,
HttpFSFileSystem.permissionToString(fileStatus.getPermission()));
json.put(HttpFSFileSystem.ACCESS_TIME_JSON, fileStatus.getAccessTime());
json.put(HttpFSFileSystem.MODIFICATION_TIME_JSON,
fileStatus.getModificationTime());
json.put(HttpFSFileSystem.BLOCK_SIZE_JSON, fileStatus.getBlockSize());
json.put(HttpFSFileSystem.REPLICATION_JSON, fileStatus.getReplication());
if ( (aclStatus != null) && !(aclStatus.getEntries().isEmpty()) ) {
json.put(HttpFSFileSystem.ACL_BIT_JSON,true);
}
return json;
}
}
/**
* Simple class used to contain and operate upon a list of StatusPair
* objects. Used by LISTSTATUS.
*/
private static class StatusPairs {
private StatusPair[] statusPairs;
/**
* Construct a list of StatusPair objects
* @param fs The FileSystem where 'path' lives
* @param path The directory to query
* @param filter A possible filter for entries in the directory
* @throws IOException
*/
public StatusPairs(FileSystem fs, Path path, PathFilter filter)
throws IOException {
/* Grab all the file statuses at once in an array */
FileStatus[] fileStatuses = fs.listStatus(path, filter);
/* We'll have an array of StatusPairs of the same length */
AclStatus aclStatus = null;
statusPairs = new StatusPair[fileStatuses.length];
/*
* For each FileStatus, attempt to acquire an AclStatus. If the
* getAclStatus throws an exception, we assume that ACLs are turned
* off entirely and abandon the attempt.
*/
boolean useAcls = true; // Assume ACLs work until proven otherwise
for (int i = 0; i < fileStatuses.length; i++) {
if (useAcls) {
try {
aclStatus = fs.getAclStatus(fileStatuses[i].getPath());
} catch (AclException e) {
/* Almost certainly due to an "ACLs not enabled" exception */
aclStatus = null;
useAcls = false;
} catch (UnsupportedOperationException e) {
/* Ditto above - this is the case for a local file system */
aclStatus = null;
useAcls = false;
}
}
statusPairs[i] = new StatusPair(fileStatuses[i], aclStatus);
}
}
/**
* Return a Map suitable for conversion into JSON.
* @return A JSONish Map
*/
@SuppressWarnings({"unchecked"})
public Map<String,Object> toJson() {
Map<String,Object> json = new LinkedHashMap<String,Object>();
Map<String,Object> inner = new LinkedHashMap<String,Object>();
JSONArray statuses = new JSONArray();
for (StatusPair s : statusPairs) {
statuses.add(s.toJsonInner(false));
}
inner.put(HttpFSFileSystem.FILE_STATUS_JSON, statuses);
json.put(HttpFSFileSystem.FILE_STATUSES_JSON, inner);
return json;
}
}
/** Converts an <code>AclStatus</code> object into a JSON object.
*
* @param aclStatus AclStatus object
*
* @return The JSON representation of the ACLs for the file
*/
@SuppressWarnings({"unchecked"})
private static Map<String,Object> aclStatusToJSON(AclStatus aclStatus) {
Map<String,Object> json = new LinkedHashMap<String,Object>();
Map<String,Object> inner = new LinkedHashMap<String,Object>();
JSONArray entriesArray = new JSONArray();
inner.put(HttpFSFileSystem.OWNER_JSON, aclStatus.getOwner());
inner.put(HttpFSFileSystem.GROUP_JSON, aclStatus.getGroup());
inner.put(HttpFSFileSystem.ACL_STICKY_BIT_JSON, aclStatus.isStickyBit());
for ( AclEntry e : aclStatus.getEntries() ) {
entriesArray.add(e.toString());
}
inner.put(HttpFSFileSystem.ACL_ENTRIES_JSON, entriesArray);
json.put(HttpFSFileSystem.ACL_STATUS_JSON, inner);
return json;
}
/**
* Converts a <code>FileChecksum</code> object into a JSON array
* object.
*
* @param checksum file checksum.
*
* @return The JSON representation of the file checksum.
*/
@SuppressWarnings({"unchecked"})
private static Map fileChecksumToJSON(FileChecksum checksum) {
Map json = new LinkedHashMap();
json.put(HttpFSFileSystem.CHECKSUM_ALGORITHM_JSON, checksum.getAlgorithmName());
json.put(HttpFSFileSystem.CHECKSUM_BYTES_JSON,
org.apache.hadoop.util.StringUtils.byteToHexString(checksum.getBytes()));
json.put(HttpFSFileSystem.CHECKSUM_LENGTH_JSON, checksum.getLength());
Map response = new LinkedHashMap();
response.put(HttpFSFileSystem.FILE_CHECKSUM_JSON, json);
return response;
}
/**
* Converts xAttrs to a JSON object.
*
* @param xAttrs file xAttrs.
* @param encoding format of xattr values.
*
* @return The JSON representation of the xAttrs.
* @throws IOException
*/
@SuppressWarnings({"unchecked", "rawtypes"})
private static Map xAttrsToJSON(Map<String, byte[]> xAttrs,
XAttrCodec encoding) throws IOException {
Map jsonMap = new LinkedHashMap();
JSONArray jsonArray = new JSONArray();
if (xAttrs != null) {
for (Entry<String, byte[]> e : xAttrs.entrySet()) {
Map json = new LinkedHashMap();
json.put(HttpFSFileSystem.XATTR_NAME_JSON, e.getKey());
if (e.getValue() != null) {
json.put(HttpFSFileSystem.XATTR_VALUE_JSON,
XAttrCodec.encodeValue(e.getValue(), encoding));
}
jsonArray.add(json);
}
}
jsonMap.put(HttpFSFileSystem.XATTRS_JSON, jsonArray);
return jsonMap;
}
/**
* Converts xAttr names to a JSON object.
*
* @param names file xAttr names.
*
* @return The JSON representation of the xAttr names.
* @throws IOException
*/
@SuppressWarnings({"unchecked", "rawtypes"})
private static Map xAttrNamesToJSON(List<String> names) throws IOException {
Map jsonMap = new LinkedHashMap();
jsonMap.put(HttpFSFileSystem.XATTRNAMES_JSON, JSONArray.toJSONString(names));
return jsonMap;
}
/**
* Converts a <code>ContentSummary</code> object into a JSON array
* object.
*
* @param contentSummary the content summary
*
* @return The JSON representation of the content summary.
*/
@SuppressWarnings({"unchecked"})
private static Map contentSummaryToJSON(ContentSummary contentSummary) {
Map json = new LinkedHashMap();
json.put(HttpFSFileSystem.CONTENT_SUMMARY_DIRECTORY_COUNT_JSON, contentSummary.getDirectoryCount());
json.put(HttpFSFileSystem.CONTENT_SUMMARY_FILE_COUNT_JSON, contentSummary.getFileCount());
json.put(HttpFSFileSystem.CONTENT_SUMMARY_LENGTH_JSON, contentSummary.getLength());
json.put(HttpFSFileSystem.CONTENT_SUMMARY_QUOTA_JSON, contentSummary.getQuota());
json.put(HttpFSFileSystem.CONTENT_SUMMARY_SPACE_CONSUMED_JSON, contentSummary.getSpaceConsumed());
json.put(HttpFSFileSystem.CONTENT_SUMMARY_SPACE_QUOTA_JSON, contentSummary.getSpaceQuota());
Map response = new LinkedHashMap();
response.put(HttpFSFileSystem.CONTENT_SUMMARY_JSON, json);
return response;
}
/**
* Converts an object into a Json Map with with one key-value entry.
* <p/>
* It assumes the given value is either a JSON primitive type or a
* <code>JsonAware</code> instance.
*
* @param name name for the key of the entry.
* @param value for the value of the entry.
*
* @return the JSON representation of the key-value pair.
*/
@SuppressWarnings("unchecked")
private static JSONObject toJSON(String name, Object value) {
JSONObject json = new JSONObject();
json.put(name, value);
return json;
}
/**
* Executor that performs an append FileSystemAccess files system operation.
*/
@InterfaceAudience.Private
public static class FSAppend implements FileSystemAccess.FileSystemExecutor<Void> {
private InputStream is;
private Path path;
/**
* Creates an Append executor.
*
* @param is input stream to append.
* @param path path of the file to append.
*/
public FSAppend(InputStream is, String path) {
this.is = is;
this.path = new Path(path);
}
/**
* Executes the filesystem operation.
*
* @param fs filesystem instance to use.
*
* @return void.
*
* @throws IOException thrown if an IO error occured.
*/
@Override
public Void execute(FileSystem fs) throws IOException {
int bufferSize = fs.getConf().getInt("httpfs.buffer.size", 4096);
OutputStream os = fs.append(path, bufferSize);
IOUtils.copyBytes(is, os, bufferSize, true);
os.close();
return null;
}
}
/**
* Executor that performs a concat FileSystemAccess files system operation.
*/
@InterfaceAudience.Private
public static class FSConcat implements FileSystemAccess.FileSystemExecutor<Void> {
private Path path;
private Path[] sources;
/**
* Creates a Concat executor.
*
* @param path target path to concat to.
* @param sources comma seperated absolute paths to use as sources.
*/
public FSConcat(String path, String[] sources) {
this.sources = new Path[sources.length];
for(int i = 0; i < sources.length; i++) {
this.sources[i] = new Path(sources[i]);
}
this.path = new Path(path);
}
/**
* Executes the filesystem operation.
*
* @param fs filesystem instance to use.
*
* @return void.
*
* @throws IOException thrown if an IO error occured.
*/
@Override
public Void execute(FileSystem fs) throws IOException {
fs.concat(path, sources);
return null;
}
}
/**
* Executor that performs a truncate FileSystemAccess files system operation.
*/
@InterfaceAudience.Private
public static class FSTruncate implements
FileSystemAccess.FileSystemExecutor<JSONObject> {
private Path path;
private long newLength;
/**
* Creates a Truncate executor.
*
* @param path target path to truncate to.
* @param newLength The size the file is to be truncated to.
*/
public FSTruncate(String path, long newLength) {
this.path = new Path(path);
this.newLength = newLength;
}
/**
* Executes the filesystem operation.
*
* @param fs filesystem instance to use.
*
* @return <code>true</code> if the file has been truncated to the desired,
* <code>false</code> if a background process of adjusting the
* length of the last block has been started, and clients should
* wait for it to complete before proceeding with further file
* updates.
*
* @throws IOException thrown if an IO error occured.
*/
@Override
public JSONObject execute(FileSystem fs) throws IOException {
boolean result = fs.truncate(path, newLength);
return toJSON(
StringUtils.toLowerCase(HttpFSFileSystem.TRUNCATE_JSON), result);
}
}
/**
* Executor that performs a content-summary FileSystemAccess files system operation.
*/
@InterfaceAudience.Private
public static class FSContentSummary implements FileSystemAccess.FileSystemExecutor<Map> {
private Path path;
/**
* Creates a content-summary executor.
*
* @param path the path to retrieve the content-summary.
*/
public FSContentSummary(String path) {
this.path = new Path(path);
}
/**
* Executes the filesystem operation.
*
* @param fs filesystem instance to use.
*
* @return a Map object (JSON friendly) with the content-summary.
*
* @throws IOException thrown if an IO error occured.
*/
@Override
public Map execute(FileSystem fs) throws IOException {
ContentSummary contentSummary = fs.getContentSummary(path);
return contentSummaryToJSON(contentSummary);
}
}
/**
* Executor that performs a create FileSystemAccess files system operation.
*/
@InterfaceAudience.Private
public static class FSCreate implements FileSystemAccess.FileSystemExecutor<Void> {
private InputStream is;
private Path path;
private short permission;
private boolean override;
private short replication;
private long blockSize;
/**
* Creates a Create executor.
*
* @param is input stream to for the file to create.
* @param path path of the file to create.
* @param perm permission for the file.
* @param override if the file should be overriden if it already exist.
* @param repl the replication factor for the file.
* @param blockSize the block size for the file.
*/
public FSCreate(InputStream is, String path, short perm, boolean override,
short repl, long blockSize) {
this.is = is;
this.path = new Path(path);
this.permission = perm;
this.override = override;
this.replication = repl;
this.blockSize = blockSize;
}
/**
* Executes the filesystem operation.
*
* @param fs filesystem instance to use.
*
* @return The URI of the created file.
*
* @throws IOException thrown if an IO error occured.
*/
@Override
public Void execute(FileSystem fs) throws IOException {
if (replication == -1) {
replication = fs.getDefaultReplication(path);
}
if (blockSize == -1) {
blockSize = fs.getDefaultBlockSize(path);
}
FsPermission fsPermission = new FsPermission(permission);
int bufferSize = fs.getConf().getInt("httpfs.buffer.size", 4096);
OutputStream os = fs.create(path, fsPermission, override, bufferSize, replication, blockSize, null);
IOUtils.copyBytes(is, os, bufferSize, true);
os.close();
return null;
}
}
/**
* Executor that performs a delete FileSystemAccess files system operation.
*/
@InterfaceAudience.Private
public static class FSDelete implements FileSystemAccess.FileSystemExecutor<JSONObject> {
private Path path;
private boolean recursive;
/**
* Creates a Delete executor.
*
* @param path path to delete.
* @param recursive if the delete should be recursive or not.
*/
public FSDelete(String path, boolean recursive) {
this.path = new Path(path);
this.recursive = recursive;
}
/**
* Executes the filesystem operation.
*
* @param fs filesystem instance to use.
*
* @return <code>true</code> if the delete operation was successful,
* <code>false</code> otherwise.
*
* @throws IOException thrown if an IO error occured.
*/
@Override
public JSONObject execute(FileSystem fs) throws IOException {
boolean deleted = fs.delete(path, recursive);
return toJSON(
StringUtils.toLowerCase(HttpFSFileSystem.DELETE_JSON), deleted);
}
}
/**
* Executor that performs a file-checksum FileSystemAccess files system operation.
*/
@InterfaceAudience.Private
public static class FSFileChecksum implements FileSystemAccess.FileSystemExecutor<Map> {
private Path path;
/**
* Creates a file-checksum executor.
*
* @param path the path to retrieve the checksum.
*/
public FSFileChecksum(String path) {
this.path = new Path(path);
}
/**
* Executes the filesystem operation.
*
* @param fs filesystem instance to use.
*
* @return a Map object (JSON friendly) with the file checksum.
*
* @throws IOException thrown if an IO error occured.
*/
@Override
public Map execute(FileSystem fs) throws IOException {
FileChecksum checksum = fs.getFileChecksum(path);
return fileChecksumToJSON(checksum);
}
}
/**
* Executor that performs a file-status FileSystemAccess files system operation.
*/
@InterfaceAudience.Private
public static class FSFileStatus implements FileSystemAccess.FileSystemExecutor<Map> {
private Path path;
/**
* Creates a file-status executor.
*
* @param path the path to retrieve the status.
*/
public FSFileStatus(String path) {
this.path = new Path(path);
}
/**
* Executes the filesystem getFileStatus operation and returns the
* result in a JSONish Map.
*
* @param fs filesystem instance to use.
*
* @return a Map object (JSON friendly) with the file status.
*
* @throws IOException thrown if an IO error occurred.
*/
@Override
public Map execute(FileSystem fs) throws IOException {
StatusPair sp = new StatusPair(fs, path);
return sp.toJson();
}
}
/**
* Executor that performs a home-dir FileSystemAccess files system operation.
*/
@InterfaceAudience.Private
public static class FSHomeDir implements FileSystemAccess.FileSystemExecutor<JSONObject> {
/**
* Executes the filesystem operation.
*
* @param fs filesystem instance to use.
*
* @return a JSON object with the user home directory.
*
* @throws IOException thrown if an IO error occured.
*/
@Override
@SuppressWarnings("unchecked")
public JSONObject execute(FileSystem fs) throws IOException {
Path homeDir = fs.getHomeDirectory();
JSONObject json = new JSONObject();
json.put(HttpFSFileSystem.HOME_DIR_JSON, homeDir.toUri().getPath());
return json;
}
}
/**
* Executor that performs a list-status FileSystemAccess files system operation.
*/
@InterfaceAudience.Private
public static class FSListStatus implements FileSystemAccess.FileSystemExecutor<Map>, PathFilter {
private Path path;
private PathFilter filter;
/**
* Creates a list-status executor.
*
* @param path the directory to retrieve the status of its contents.
* @param filter glob filter to use.
*
* @throws IOException thrown if the filter expression is incorrect.
*/
public FSListStatus(String path, String filter) throws IOException {
this.path = new Path(path);
this.filter = (filter == null) ? this : new GlobFilter(filter);
}
/**
* Returns data for a JSON Map containing the information for
* the set of files in 'path' that match 'filter'.
*
* @param fs filesystem instance to use.
*
* @return a Map with the file status of the directory
* contents that match the filter
*
* @throws IOException thrown if an IO error occurred.
*/
@Override
public Map execute(FileSystem fs) throws IOException {
StatusPairs sp = new StatusPairs(fs, path, filter);
return sp.toJson();
}
@Override
public boolean accept(Path path) {
return true;
}
}
/**
* Executor that performs a mkdirs FileSystemAccess files system operation.
*/
@InterfaceAudience.Private
public static class FSMkdirs implements FileSystemAccess.FileSystemExecutor<JSONObject> {
private Path path;
private short permission;
/**
* Creates a mkdirs executor.
*
* @param path directory path to create.
* @param permission permission to use.
*/
public FSMkdirs(String path, short permission) {
this.path = new Path(path);
this.permission = permission;
}
/**
* Executes the filesystem operation.
*
* @param fs filesystem instance to use.
*
* @return <code>true</code> if the mkdirs operation was successful,
* <code>false</code> otherwise.
*
* @throws IOException thrown if an IO error occured.
*/
@Override
public JSONObject execute(FileSystem fs) throws IOException {
FsPermission fsPermission = new FsPermission(permission);
boolean mkdirs = fs.mkdirs(path, fsPermission);
return toJSON(HttpFSFileSystem.MKDIRS_JSON, mkdirs);
}
}
/**
* Executor that performs a open FileSystemAccess files system operation.
*/
@InterfaceAudience.Private
public static class FSOpen implements FileSystemAccess.FileSystemExecutor<InputStream> {
private Path path;
/**
* Creates a open executor.
*
* @param path file to open.
*/
public FSOpen(String path) {
this.path = new Path(path);
}
/**
* Executes the filesystem operation.
*
* @param fs filesystem instance to use.
*
* @return The inputstream of the file.
*
* @throws IOException thrown if an IO error occured.
*/
@Override
public InputStream execute(FileSystem fs) throws IOException {
int bufferSize = HttpFSServerWebApp.get().getConfig().getInt("httpfs.buffer.size", 4096);
return fs.open(path, bufferSize);
}
}
/**
* Executor that performs a rename FileSystemAccess files system operation.
*/
@InterfaceAudience.Private
public static class FSRename implements FileSystemAccess.FileSystemExecutor<JSONObject> {
private Path path;
private Path toPath;
/**
* Creates a rename executor.
*
* @param path path to rename.
* @param toPath new name.
*/
public FSRename(String path, String toPath) {
this.path = new Path(path);
this.toPath = new Path(toPath);
}
/**
* Executes the filesystem operation.
*
* @param fs filesystem instance to use.
*
* @return <code>true</code> if the rename operation was successful,
* <code>false</code> otherwise.
*
* @throws IOException thrown if an IO error occured.
*/
@Override
public JSONObject execute(FileSystem fs) throws IOException {
boolean renamed = fs.rename(path, toPath);
return toJSON(HttpFSFileSystem.RENAME_JSON, renamed);
}
}
/**
* Executor that performs a set-owner FileSystemAccess files system operation.
*/
@InterfaceAudience.Private
public static class FSSetOwner implements FileSystemAccess.FileSystemExecutor<Void> {
private Path path;
private String owner;
private String group;
/**
* Creates a set-owner executor.
*
* @param path the path to set the owner.
* @param owner owner to set.
* @param group group to set.
*/
public FSSetOwner(String path, String owner, String group) {
this.path = new Path(path);
this.owner = owner;
this.group = group;
}
/**
* Executes the filesystem operation.
*
* @param fs filesystem instance to use.
*
* @return void.
*
* @throws IOException thrown if an IO error occured.
*/
@Override
public Void execute(FileSystem fs) throws IOException {
fs.setOwner(path, owner, group);
return null;
}
}
/**
* Executor that performs a set-permission FileSystemAccess files system operation.
*/
@InterfaceAudience.Private
public static class FSSetPermission implements FileSystemAccess.FileSystemExecutor<Void> {
private Path path;
private short permission;
/**
* Creates a set-permission executor.
*
* @param path path to set the permission.
* @param permission permission to set.
*/
public FSSetPermission(String path, short permission) {
this.path = new Path(path);
this.permission = permission;
}
/**
* Executes the filesystem operation.
*
* @param fs filesystem instance to use.
*
* @return void.
*
* @throws IOException thrown if an IO error occured.
*/
@Override
public Void execute(FileSystem fs) throws IOException {
FsPermission fsPermission = new FsPermission(permission);
fs.setPermission(path, fsPermission);
return null;
}
}
/**
* Executor that sets the acl for a file in a FileSystem
*/
@InterfaceAudience.Private
public static class FSSetAcl implements FileSystemAccess.FileSystemExecutor<Void> {
private Path path;
private List<AclEntry> aclEntries;
/**
* Creates a set-acl executor.
*
* @param path path to set the acl.
* @param aclSpec acl to set.
*/
public FSSetAcl(String path, String aclSpec) {
this.path = new Path(path);
this.aclEntries = AclEntry.parseAclSpec(aclSpec, true);
}
/**
* Executes the filesystem operation.
*
* @param fs filesystem instance to use.
*
* @return void.
*
* @throws IOException thrown if an IO error occurred.
*/
@Override
public Void execute(FileSystem fs) throws IOException {
fs.setAcl(path, aclEntries);
return null;
}
}
/**
* Executor that removes all acls from a file in a FileSystem
*/
@InterfaceAudience.Private
public static class FSRemoveAcl implements FileSystemAccess.FileSystemExecutor<Void> {
private Path path;
/**
* Creates a remove-acl executor.
*
* @param path path from which to remove the acl.
*/
public FSRemoveAcl(String path) {
this.path = new Path(path);
}
/**
* Executes the filesystem operation.
*
* @param fs filesystem instance to use.
*
* @return void.
*
* @throws IOException thrown if an IO error occurred.
*/
@Override
public Void execute(FileSystem fs) throws IOException {
fs.removeAcl(path);
return null;
}
}
/**
* Executor that modifies acl entries for a file in a FileSystem
*/
@InterfaceAudience.Private
public static class FSModifyAclEntries implements FileSystemAccess.FileSystemExecutor<Void> {
private Path path;
private List<AclEntry> aclEntries;
/**
* Creates a modify-acl executor.
*
* @param path path to set the acl.
* @param aclSpec acl to set.
*/
public FSModifyAclEntries(String path, String aclSpec) {
this.path = new Path(path);
this.aclEntries = AclEntry.parseAclSpec(aclSpec, true);
}
/**
* Executes the filesystem operation.
*
* @param fs filesystem instance to use.
*
* @return void.
*
* @throws IOException thrown if an IO error occurred.
*/
@Override
public Void execute(FileSystem fs) throws IOException {
fs.modifyAclEntries(path, aclEntries);
return null;
}
}
/**
* Executor that removes acl entries from a file in a FileSystem
*/
@InterfaceAudience.Private
public static class FSRemoveAclEntries implements FileSystemAccess.FileSystemExecutor<Void> {
private Path path;
private List<AclEntry> aclEntries;
/**
* Creates a remove acl entry executor.
*
* @param path path to set the acl.
* @param aclSpec acl parts to remove.
*/
public FSRemoveAclEntries(String path, String aclSpec) {
this.path = new Path(path);
this.aclEntries = AclEntry.parseAclSpec(aclSpec, true);
}
/**
* Executes the filesystem operation.
*
* @param fs filesystem instance to use.
*
* @return void.
*
* @throws IOException thrown if an IO error occurred.
*/
@Override
public Void execute(FileSystem fs) throws IOException {
fs.removeAclEntries(path, aclEntries);
return null;
}
}
/**
* Executor that removes the default acl from a directory in a FileSystem
*/
@InterfaceAudience.Private
public static class FSRemoveDefaultAcl implements FileSystemAccess.FileSystemExecutor<Void> {
private Path path;
/**
* Creates an executor for removing the default acl.
*
* @param path path to set the acl.
*/
public FSRemoveDefaultAcl(String path) {
this.path = new Path(path);
}
/**
* Executes the filesystem operation.
*
* @param fs filesystem instance to use.
*
* @return void.
*
* @throws IOException thrown if an IO error occurred.
*/
@Override
public Void execute(FileSystem fs) throws IOException {
fs.removeDefaultAcl(path);
return null;
}
}
/**
* Executor that gets the ACL information for a given file.
*/
@InterfaceAudience.Private
public static class FSAclStatus implements FileSystemAccess.FileSystemExecutor<Map> {
private Path path;
/**
* Creates an executor for getting the ACLs for a file.
*
* @param path the path to retrieve the ACLs.
*/
public FSAclStatus(String path) {
this.path = new Path(path);
}
/**
* Executes the filesystem operation.
*
* @param fs filesystem instance to use.
*
* @return a Map object (JSON friendly) with the file status.
*
* @throws IOException thrown if an IO error occurred.
*/
@Override
public Map execute(FileSystem fs) throws IOException {
AclStatus status = fs.getAclStatus(path);
return aclStatusToJSON(status);
}
}
/**
* Executor that performs a set-replication FileSystemAccess files system operation.
*/
@InterfaceAudience.Private
public static class FSSetReplication implements FileSystemAccess.FileSystemExecutor<JSONObject> {
private Path path;
private short replication;
/**
* Creates a set-replication executor.
*
* @param path path to set the replication factor.
* @param replication replication factor to set.
*/
public FSSetReplication(String path, short replication) {
this.path = new Path(path);
this.replication = replication;
}
/**
* Executes the filesystem operation.
*
* @param fs filesystem instance to use.
*
* @return <code>true</code> if the replication value was set,
* <code>false</code> otherwise.
*
* @throws IOException thrown if an IO error occured.
*/
@Override
@SuppressWarnings("unchecked")
public JSONObject execute(FileSystem fs) throws IOException {
boolean ret = fs.setReplication(path, replication);
JSONObject json = new JSONObject();
json.put(HttpFSFileSystem.SET_REPLICATION_JSON, ret);
return json;
}
}
/**
* Executor that performs a set-times FileSystemAccess files system operation.
*/
@InterfaceAudience.Private
public static class FSSetTimes implements FileSystemAccess.FileSystemExecutor<Void> {
private Path path;
private long mTime;
private long aTime;
/**
* Creates a set-times executor.
*
* @param path path to set the times.
* @param mTime modified time to set.
* @param aTime access time to set.
*/
public FSSetTimes(String path, long mTime, long aTime) {
this.path = new Path(path);
this.mTime = mTime;
this.aTime = aTime;
}
/**
* Executes the filesystem operation.
*
* @param fs filesystem instance to use.
*
* @return void.
*
* @throws IOException thrown if an IO error occured.
*/
@Override
public Void execute(FileSystem fs) throws IOException {
fs.setTimes(path, mTime, aTime);
return null;
}
}
/**
* Executor that performs a setxattr FileSystemAccess files system operation.
*/
@InterfaceAudience.Private
public static class FSSetXAttr implements
FileSystemAccess.FileSystemExecutor<Void> {
private Path path;
private String name;
private byte[] value;
private EnumSet<XAttrSetFlag> flag;
public FSSetXAttr(String path, String name, String encodedValue,
EnumSet<XAttrSetFlag> flag) throws IOException {
this.path = new Path(path);
this.name = name;
this.value = XAttrCodec.decodeValue(encodedValue);
this.flag = flag;
}
@Override
public Void execute(FileSystem fs) throws IOException {
fs.setXAttr(path, name, value, flag);
return null;
}
}
/**
* Executor that performs a removexattr FileSystemAccess files system
* operation.
*/
@InterfaceAudience.Private
public static class FSRemoveXAttr implements
FileSystemAccess.FileSystemExecutor<Void> {
private Path path;
private String name;
public FSRemoveXAttr(String path, String name) {
this.path = new Path(path);
this.name = name;
}
@Override
public Void execute(FileSystem fs) throws IOException {
fs.removeXAttr(path, name);
return null;
}
}
/**
* Executor that performs listing xattrs FileSystemAccess files system
* operation.
*/
@SuppressWarnings("rawtypes")
@InterfaceAudience.Private
public static class FSListXAttrs implements
FileSystemAccess.FileSystemExecutor<Map> {
private Path path;
/**
* Creates listing xattrs executor.
*
* @param path the path to retrieve the xattrs.
*/
public FSListXAttrs(String path) {
this.path = new Path(path);
}
/**
* Executes the filesystem operation.
*
* @param fs filesystem instance to use.
*
* @return Map a map object (JSON friendly) with the xattr names.
*
* @throws IOException thrown if an IO error occured.
*/
@Override
public Map execute(FileSystem fs) throws IOException {
List<String> names = fs.listXAttrs(path);
return xAttrNamesToJSON(names);
}
}
/**
* Executor that performs getting xattrs FileSystemAccess files system
* operation.
*/
@SuppressWarnings("rawtypes")
@InterfaceAudience.Private
public static class FSGetXAttrs implements
FileSystemAccess.FileSystemExecutor<Map> {
private Path path;
private List<String> names;
private XAttrCodec encoding;
/**
* Creates getting xattrs executor.
*
* @param path the path to retrieve the xattrs.
*/
public FSGetXAttrs(String path, List<String> names, XAttrCodec encoding) {
this.path = new Path(path);
this.names = names;
this.encoding = encoding;
}
/**
* Executes the filesystem operation.
*
* @param fs filesystem instance to use.
*
* @return Map a map object (JSON friendly) with the xattrs.
*
* @throws IOException thrown if an IO error occured.
*/
@Override
public Map execute(FileSystem fs) throws IOException {
Map<String, byte[]> xattrs = null;
if (names != null && !names.isEmpty()) {
xattrs = fs.getXAttrs(path, names);
} else {
xattrs = fs.getXAttrs(path);
}
return xAttrsToJSON(xattrs, encoding);
}
}
}
| 39,000 | 28.479214 | 106 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSReleaseFilter.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.http.server;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.lib.service.FileSystemAccess;
import org.apache.hadoop.lib.servlet.FileSystemReleaseFilter;
/**
* Filter that releases FileSystemAccess filesystem instances upon HTTP request
* completion.
*/
@InterfaceAudience.Private
public class HttpFSReleaseFilter extends FileSystemReleaseFilter {
/**
* Returns the {@link FileSystemAccess} service to return the FileSystemAccess filesystem
* instance to.
*
* @return the FileSystemAccess service.
*/
@Override
protected FileSystemAccess getFileSystemAccess() {
return HttpFSServerWebApp.get().get(FileSystemAccess.class);
}
}
| 1,533 | 33.863636 | 91 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.http.server;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.XAttrCodec;
import org.apache.hadoop.fs.XAttrSetFlag;
import org.apache.hadoop.fs.http.client.HttpFSFileSystem;
import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.AccessTimeParam;
import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.AclPermissionParam;
import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.BlockSizeParam;
import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.DataParam;
import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.DestinationParam;
import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.FilterParam;
import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.GroupParam;
import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.LenParam;
import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.ModifiedTimeParam;
import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.NewLengthParam;
import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.OffsetParam;
import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.OperationParam;
import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.OverwriteParam;
import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.OwnerParam;
import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.PermissionParam;
import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.RecursiveParam;
import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.ReplicationParam;
import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.SourcesParam;
import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.XAttrEncodingParam;
import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.XAttrNameParam;
import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.XAttrSetFlagParam;
import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.XAttrValueParam;
import org.apache.hadoop.lib.service.FileSystemAccess;
import org.apache.hadoop.lib.service.FileSystemAccessException;
import org.apache.hadoop.lib.service.Groups;
import org.apache.hadoop.lib.service.Instrumentation;
import org.apache.hadoop.lib.servlet.FileSystemReleaseFilter;
import org.apache.hadoop.lib.wsrs.InputStreamEntity;
import org.apache.hadoop.lib.wsrs.Parameters;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.delegation.web.HttpUserGroupInformation;
import org.json.simple.JSONObject;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.slf4j.MDC;
import javax.servlet.http.HttpServletRequest;
import javax.ws.rs.Consumes;
import javax.ws.rs.DELETE;
import javax.ws.rs.GET;
import javax.ws.rs.POST;
import javax.ws.rs.PUT;
import javax.ws.rs.Path;
import javax.ws.rs.PathParam;
import javax.ws.rs.Produces;
import javax.ws.rs.QueryParam;
import javax.ws.rs.core.Context;
import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.Response;
import javax.ws.rs.core.UriBuilder;
import javax.ws.rs.core.UriInfo;
import java.io.IOException;
import java.io.InputStream;
import java.net.URI;
import java.security.AccessControlException;
import java.text.MessageFormat;
import java.util.EnumSet;
import java.util.List;
import java.util.Map;
/**
* Main class of HttpFSServer server.
* <p>
* The <code>HttpFSServer</code> class uses Jersey JAX-RS to binds HTTP requests to the
* different operations.
*/
@Path(HttpFSFileSystem.SERVICE_VERSION)
@InterfaceAudience.Private
public class HttpFSServer {
private static Logger AUDIT_LOG = LoggerFactory.getLogger("httpfsaudit");
/**
* Executes a {@link FileSystemAccess.FileSystemExecutor} using a filesystem for the effective
* user.
*
* @param ugi user making the request.
* @param executor FileSystemExecutor to execute.
*
* @return FileSystemExecutor response
*
* @throws IOException thrown if an IO error occurs.
* @throws FileSystemAccessException thrown if a FileSystemAccess releated error occurred. Thrown
* exceptions are handled by {@link HttpFSExceptionProvider}.
*/
private <T> T fsExecute(UserGroupInformation ugi, FileSystemAccess.FileSystemExecutor<T> executor)
throws IOException, FileSystemAccessException {
FileSystemAccess fsAccess = HttpFSServerWebApp.get().get(FileSystemAccess.class);
Configuration conf = HttpFSServerWebApp.get().get(FileSystemAccess.class).getFileSystemConfiguration();
return fsAccess.execute(ugi.getShortUserName(), conf, executor);
}
/**
* Returns a filesystem instance. The fileystem instance is wired for release at the completion of
* the current Servlet request via the {@link FileSystemReleaseFilter}.
* <p>
* If a do-as user is specified, the current user must be a valid proxyuser, otherwise an
* <code>AccessControlException</code> will be thrown.
*
* @param ugi principal for whom the filesystem instance is.
*
* @return a filesystem for the specified user or do-as user.
*
* @throws IOException thrown if an IO error occurred. Thrown exceptions are
* handled by {@link HttpFSExceptionProvider}.
* @throws FileSystemAccessException thrown if a FileSystemAccess releated error occurred. Thrown
* exceptions are handled by {@link HttpFSExceptionProvider}.
*/
private FileSystem createFileSystem(UserGroupInformation ugi)
throws IOException, FileSystemAccessException {
String hadoopUser = ugi.getShortUserName();
FileSystemAccess fsAccess = HttpFSServerWebApp.get().get(FileSystemAccess.class);
Configuration conf = HttpFSServerWebApp.get().get(FileSystemAccess.class).getFileSystemConfiguration();
FileSystem fs = fsAccess.createFileSystem(hadoopUser, conf);
FileSystemReleaseFilter.setFileSystem(fs);
return fs;
}
private void enforceRootPath(HttpFSFileSystem.Operation op, String path) {
if (!path.equals("/")) {
throw new UnsupportedOperationException(
MessageFormat.format("Operation [{0}], invalid path [{1}], must be '/'",
op, path));
}
}
/**
* Special binding for '/' as it is not handled by the wildcard binding.
*
* @param op the HttpFS operation of the request.
* @param params the HttpFS parameters of the request.
*
* @return the request response.
*
* @throws IOException thrown if an IO error occurred. Thrown exceptions are
* handled by {@link HttpFSExceptionProvider}.
* @throws FileSystemAccessException thrown if a FileSystemAccess releated
* error occurred. Thrown exceptions are handled by
* {@link HttpFSExceptionProvider}.
*/
@GET
@Produces(MediaType.APPLICATION_JSON)
public Response getRoot(@QueryParam(OperationParam.NAME) OperationParam op,
@Context Parameters params,
@Context HttpServletRequest request)
throws IOException, FileSystemAccessException {
return get("", op, params, request);
}
private String makeAbsolute(String path) {
return "/" + ((path != null) ? path : "");
}
/**
* Binding to handle GET requests, supported operations are
*
* @param path the path for operation.
* @param op the HttpFS operation of the request.
* @param params the HttpFS parameters of the request.
*
* @return the request response.
*
* @throws IOException thrown if an IO error occurred. Thrown exceptions are
* handled by {@link HttpFSExceptionProvider}.
* @throws FileSystemAccessException thrown if a FileSystemAccess releated
* error occurred. Thrown exceptions are handled by
* {@link HttpFSExceptionProvider}.
*/
@GET
@Path("{path:.*}")
@Produces({MediaType.APPLICATION_OCTET_STREAM, MediaType.APPLICATION_JSON})
public Response get(@PathParam("path") String path,
@QueryParam(OperationParam.NAME) OperationParam op,
@Context Parameters params,
@Context HttpServletRequest request)
throws IOException, FileSystemAccessException {
UserGroupInformation user = HttpUserGroupInformation.get();
Response response;
path = makeAbsolute(path);
MDC.put(HttpFSFileSystem.OP_PARAM, op.value().name());
MDC.put("hostname", request.getRemoteAddr());
switch (op.value()) {
case OPEN: {
//Invoking the command directly using an unmanaged FileSystem that is
// released by the FileSystemReleaseFilter
FSOperations.FSOpen command = new FSOperations.FSOpen(path);
FileSystem fs = createFileSystem(user);
InputStream is = command.execute(fs);
Long offset = params.get(OffsetParam.NAME, OffsetParam.class);
Long len = params.get(LenParam.NAME, LenParam.class);
AUDIT_LOG.info("[{}] offset [{}] len [{}]",
new Object[]{path, offset, len});
InputStreamEntity entity = new InputStreamEntity(is, offset, len);
response =
Response.ok(entity).type(MediaType.APPLICATION_OCTET_STREAM).build();
break;
}
case GETFILESTATUS: {
FSOperations.FSFileStatus command =
new FSOperations.FSFileStatus(path);
Map json = fsExecute(user, command);
AUDIT_LOG.info("[{}]", path);
response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
break;
}
case LISTSTATUS: {
String filter = params.get(FilterParam.NAME, FilterParam.class);
FSOperations.FSListStatus command = new FSOperations.FSListStatus(
path, filter);
Map json = fsExecute(user, command);
AUDIT_LOG.info("[{}] filter [{}]", path,
(filter != null) ? filter : "-");
response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
break;
}
case GETHOMEDIRECTORY: {
enforceRootPath(op.value(), path);
FSOperations.FSHomeDir command = new FSOperations.FSHomeDir();
JSONObject json = fsExecute(user, command);
AUDIT_LOG.info("");
response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
break;
}
case INSTRUMENTATION: {
enforceRootPath(op.value(), path);
Groups groups = HttpFSServerWebApp.get().get(Groups.class);
List<String> userGroups = groups.getGroups(user.getShortUserName());
if (!userGroups.contains(HttpFSServerWebApp.get().getAdminGroup())) {
throw new AccessControlException(
"User not in HttpFSServer admin group");
}
Instrumentation instrumentation =
HttpFSServerWebApp.get().get(Instrumentation.class);
Map snapshot = instrumentation.getSnapshot();
response = Response.ok(snapshot).build();
break;
}
case GETCONTENTSUMMARY: {
FSOperations.FSContentSummary command =
new FSOperations.FSContentSummary(path);
Map json = fsExecute(user, command);
AUDIT_LOG.info("[{}]", path);
response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
break;
}
case GETFILECHECKSUM: {
FSOperations.FSFileChecksum command =
new FSOperations.FSFileChecksum(path);
Map json = fsExecute(user, command);
AUDIT_LOG.info("[{}]", path);
response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
break;
}
case GETFILEBLOCKLOCATIONS: {
response = Response.status(Response.Status.BAD_REQUEST).build();
break;
}
case GETACLSTATUS: {
FSOperations.FSAclStatus command =
new FSOperations.FSAclStatus(path);
Map json = fsExecute(user, command);
AUDIT_LOG.info("ACL status for [{}]", path);
response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
break;
}
case GETXATTRS: {
List<String> xattrNames = params.getValues(XAttrNameParam.NAME,
XAttrNameParam.class);
XAttrCodec encoding = params.get(XAttrEncodingParam.NAME,
XAttrEncodingParam.class);
FSOperations.FSGetXAttrs command = new FSOperations.FSGetXAttrs(path,
xattrNames, encoding);
@SuppressWarnings("rawtypes")
Map json = fsExecute(user, command);
AUDIT_LOG.info("XAttrs for [{}]", path);
response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
break;
}
case LISTXATTRS: {
FSOperations.FSListXAttrs command = new FSOperations.FSListXAttrs(path);
@SuppressWarnings("rawtypes")
Map json = fsExecute(user, command);
AUDIT_LOG.info("XAttr names for [{}]", path);
response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
break;
}
default: {
throw new IOException(
MessageFormat.format("Invalid HTTP GET operation [{0}]",
op.value()));
}
}
return response;
}
/**
* Binding to handle DELETE requests.
*
* @param path the path for operation.
* @param op the HttpFS operation of the request.
* @param params the HttpFS parameters of the request.
*
* @return the request response.
*
* @throws IOException thrown if an IO error occurred. Thrown exceptions are
* handled by {@link HttpFSExceptionProvider}.
* @throws FileSystemAccessException thrown if a FileSystemAccess releated
* error occurred. Thrown exceptions are handled by
* {@link HttpFSExceptionProvider}.
*/
@DELETE
@Path("{path:.*}")
@Produces(MediaType.APPLICATION_JSON)
public Response delete(@PathParam("path") String path,
@QueryParam(OperationParam.NAME) OperationParam op,
@Context Parameters params,
@Context HttpServletRequest request)
throws IOException, FileSystemAccessException {
UserGroupInformation user = HttpUserGroupInformation.get();
Response response;
path = makeAbsolute(path);
MDC.put(HttpFSFileSystem.OP_PARAM, op.value().name());
MDC.put("hostname", request.getRemoteAddr());
switch (op.value()) {
case DELETE: {
Boolean recursive =
params.get(RecursiveParam.NAME, RecursiveParam.class);
AUDIT_LOG.info("[{}] recursive [{}]", path, recursive);
FSOperations.FSDelete command =
new FSOperations.FSDelete(path, recursive);
JSONObject json = fsExecute(user, command);
response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
break;
}
default: {
throw new IOException(
MessageFormat.format("Invalid HTTP DELETE operation [{0}]",
op.value()));
}
}
return response;
}
/**
* Binding to handle POST requests.
*
* @param is the inputstream for the request payload.
* @param uriInfo the of the request.
* @param path the path for operation.
* @param op the HttpFS operation of the request.
* @param params the HttpFS parameters of the request.
*
* @return the request response.
*
* @throws IOException thrown if an IO error occurred. Thrown exceptions are
* handled by {@link HttpFSExceptionProvider}.
* @throws FileSystemAccessException thrown if a FileSystemAccess releated
* error occurred. Thrown exceptions are handled by
* {@link HttpFSExceptionProvider}.
*/
@POST
@Path("{path:.*}")
@Consumes({"*/*"})
@Produces({MediaType.APPLICATION_JSON})
public Response post(InputStream is,
@Context UriInfo uriInfo,
@PathParam("path") String path,
@QueryParam(OperationParam.NAME) OperationParam op,
@Context Parameters params,
@Context HttpServletRequest request)
throws IOException, FileSystemAccessException {
UserGroupInformation user = HttpUserGroupInformation.get();
Response response;
path = makeAbsolute(path);
MDC.put(HttpFSFileSystem.OP_PARAM, op.value().name());
MDC.put("hostname", request.getRemoteAddr());
switch (op.value()) {
case APPEND: {
Boolean hasData = params.get(DataParam.NAME, DataParam.class);
if (!hasData) {
response = Response.temporaryRedirect(
createUploadRedirectionURL(uriInfo,
HttpFSFileSystem.Operation.APPEND)).build();
} else {
FSOperations.FSAppend command =
new FSOperations.FSAppend(is, path);
fsExecute(user, command);
AUDIT_LOG.info("[{}]", path);
response = Response.ok().type(MediaType.APPLICATION_JSON).build();
}
break;
}
case CONCAT: {
System.out.println("HTTPFS SERVER CONCAT");
String sources = params.get(SourcesParam.NAME, SourcesParam.class);
FSOperations.FSConcat command =
new FSOperations.FSConcat(path, sources.split(","));
fsExecute(user, command);
AUDIT_LOG.info("[{}]", path);
System.out.println("SENT RESPONSE");
response = Response.ok().build();
break;
}
case TRUNCATE: {
Long newLength = params.get(NewLengthParam.NAME, NewLengthParam.class);
FSOperations.FSTruncate command =
new FSOperations.FSTruncate(path, newLength);
JSONObject json = fsExecute(user, command);
AUDIT_LOG.info("Truncate [{}] to length [{}]", path, newLength);
response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
break;
}
default: {
throw new IOException(
MessageFormat.format("Invalid HTTP POST operation [{0}]",
op.value()));
}
}
return response;
}
/**
* Creates the URL for an upload operation (create or append).
*
* @param uriInfo uri info of the request.
* @param uploadOperation operation for the upload URL.
*
* @return the URI for uploading data.
*/
protected URI createUploadRedirectionURL(UriInfo uriInfo, Enum<?> uploadOperation) {
UriBuilder uriBuilder = uriInfo.getRequestUriBuilder();
uriBuilder = uriBuilder.replaceQueryParam(OperationParam.NAME, uploadOperation).
queryParam(DataParam.NAME, Boolean.TRUE);
return uriBuilder.build(null);
}
/**
* Binding to handle PUT requests.
*
* @param is the inputstream for the request payload.
* @param uriInfo the of the request.
* @param path the path for operation.
* @param op the HttpFS operation of the request.
* @param params the HttpFS parameters of the request.
*
* @return the request response.
*
* @throws IOException thrown if an IO error occurred. Thrown exceptions are
* handled by {@link HttpFSExceptionProvider}.
* @throws FileSystemAccessException thrown if a FileSystemAccess releated
* error occurred. Thrown exceptions are handled by
* {@link HttpFSExceptionProvider}.
*/
@PUT
@Path("{path:.*}")
@Consumes({"*/*"})
@Produces({MediaType.APPLICATION_JSON})
public Response put(InputStream is,
@Context UriInfo uriInfo,
@PathParam("path") String path,
@QueryParam(OperationParam.NAME) OperationParam op,
@Context Parameters params,
@Context HttpServletRequest request)
throws IOException, FileSystemAccessException {
UserGroupInformation user = HttpUserGroupInformation.get();
Response response;
path = makeAbsolute(path);
MDC.put(HttpFSFileSystem.OP_PARAM, op.value().name());
MDC.put("hostname", request.getRemoteAddr());
switch (op.value()) {
case CREATE: {
Boolean hasData = params.get(DataParam.NAME, DataParam.class);
if (!hasData) {
response = Response.temporaryRedirect(
createUploadRedirectionURL(uriInfo,
HttpFSFileSystem.Operation.CREATE)).build();
} else {
Short permission = params.get(PermissionParam.NAME,
PermissionParam.class);
Boolean override = params.get(OverwriteParam.NAME,
OverwriteParam.class);
Short replication = params.get(ReplicationParam.NAME,
ReplicationParam.class);
Long blockSize = params.get(BlockSizeParam.NAME,
BlockSizeParam.class);
FSOperations.FSCreate command =
new FSOperations.FSCreate(is, path, permission, override,
replication, blockSize);
fsExecute(user, command);
AUDIT_LOG.info(
"[{}] permission [{}] override [{}] replication [{}] blockSize [{}]",
new Object[]{path, permission, override, replication, blockSize});
response = Response.status(Response.Status.CREATED).build();
}
break;
}
case SETXATTR: {
String xattrName = params.get(XAttrNameParam.NAME,
XAttrNameParam.class);
String xattrValue = params.get(XAttrValueParam.NAME,
XAttrValueParam.class);
EnumSet<XAttrSetFlag> flag = params.get(XAttrSetFlagParam.NAME,
XAttrSetFlagParam.class);
FSOperations.FSSetXAttr command = new FSOperations.FSSetXAttr(
path, xattrName, xattrValue, flag);
fsExecute(user, command);
AUDIT_LOG.info("[{}] to xAttr [{}]", path, xattrName);
response = Response.ok().build();
break;
}
case REMOVEXATTR: {
String xattrName = params.get(XAttrNameParam.NAME, XAttrNameParam.class);
FSOperations.FSRemoveXAttr command = new FSOperations.FSRemoveXAttr(
path, xattrName);
fsExecute(user, command);
AUDIT_LOG.info("[{}] removed xAttr [{}]", path, xattrName);
response = Response.ok().build();
break;
}
case MKDIRS: {
Short permission = params.get(PermissionParam.NAME,
PermissionParam.class);
FSOperations.FSMkdirs command =
new FSOperations.FSMkdirs(path, permission);
JSONObject json = fsExecute(user, command);
AUDIT_LOG.info("[{}] permission [{}]", path, permission);
response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
break;
}
case RENAME: {
String toPath = params.get(DestinationParam.NAME, DestinationParam.class);
FSOperations.FSRename command =
new FSOperations.FSRename(path, toPath);
JSONObject json = fsExecute(user, command);
AUDIT_LOG.info("[{}] to [{}]", path, toPath);
response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
break;
}
case SETOWNER: {
String owner = params.get(OwnerParam.NAME, OwnerParam.class);
String group = params.get(GroupParam.NAME, GroupParam.class);
FSOperations.FSSetOwner command =
new FSOperations.FSSetOwner(path, owner, group);
fsExecute(user, command);
AUDIT_LOG.info("[{}] to (O/G)[{}]", path, owner + ":" + group);
response = Response.ok().build();
break;
}
case SETPERMISSION: {
Short permission = params.get(PermissionParam.NAME,
PermissionParam.class);
FSOperations.FSSetPermission command =
new FSOperations.FSSetPermission(path, permission);
fsExecute(user, command);
AUDIT_LOG.info("[{}] to [{}]", path, permission);
response = Response.ok().build();
break;
}
case SETREPLICATION: {
Short replication = params.get(ReplicationParam.NAME,
ReplicationParam.class);
FSOperations.FSSetReplication command =
new FSOperations.FSSetReplication(path, replication);
JSONObject json = fsExecute(user, command);
AUDIT_LOG.info("[{}] to [{}]", path, replication);
response = Response.ok(json).build();
break;
}
case SETTIMES: {
Long modifiedTime = params.get(ModifiedTimeParam.NAME,
ModifiedTimeParam.class);
Long accessTime = params.get(AccessTimeParam.NAME,
AccessTimeParam.class);
FSOperations.FSSetTimes command =
new FSOperations.FSSetTimes(path, modifiedTime, accessTime);
fsExecute(user, command);
AUDIT_LOG.info("[{}] to (M/A)[{}]", path,
modifiedTime + ":" + accessTime);
response = Response.ok().build();
break;
}
case SETACL: {
String aclSpec = params.get(AclPermissionParam.NAME,
AclPermissionParam.class);
FSOperations.FSSetAcl command =
new FSOperations.FSSetAcl(path, aclSpec);
fsExecute(user, command);
AUDIT_LOG.info("[{}] to acl [{}]", path, aclSpec);
response = Response.ok().build();
break;
}
case REMOVEACL: {
FSOperations.FSRemoveAcl command =
new FSOperations.FSRemoveAcl(path);
fsExecute(user, command);
AUDIT_LOG.info("[{}] removed acl", path);
response = Response.ok().build();
break;
}
case MODIFYACLENTRIES: {
String aclSpec = params.get(AclPermissionParam.NAME,
AclPermissionParam.class);
FSOperations.FSModifyAclEntries command =
new FSOperations.FSModifyAclEntries(path, aclSpec);
fsExecute(user, command);
AUDIT_LOG.info("[{}] modify acl entry with [{}]", path, aclSpec);
response = Response.ok().build();
break;
}
case REMOVEACLENTRIES: {
String aclSpec = params.get(AclPermissionParam.NAME,
AclPermissionParam.class);
FSOperations.FSRemoveAclEntries command =
new FSOperations.FSRemoveAclEntries(path, aclSpec);
fsExecute(user, command);
AUDIT_LOG.info("[{}] remove acl entry [{}]", path, aclSpec);
response = Response.ok().build();
break;
}
case REMOVEDEFAULTACL: {
FSOperations.FSRemoveDefaultAcl command =
new FSOperations.FSRemoveDefaultAcl(path);
fsExecute(user, command);
AUDIT_LOG.info("[{}] remove default acl", path);
response = Response.ok().build();
break;
}
default: {
throw new IOException(
MessageFormat.format("Invalid HTTP PUT operation [{0}]",
op.value()));
}
}
return response;
}
}
| 27,664 | 40.476762 | 107 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSExceptionProvider.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.http.server;
import com.sun.jersey.api.container.ContainerException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.lib.service.FileSystemAccessException;
import org.apache.hadoop.lib.wsrs.ExceptionProvider;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.slf4j.MDC;
import javax.ws.rs.core.Response;
import javax.ws.rs.ext.Provider;
import java.io.FileNotFoundException;
import java.io.IOException;
/**
* JAX-RS <code>ExceptionMapper</code> implementation that maps HttpFSServer's
* exceptions to HTTP status codes.
*/
@Provider
@InterfaceAudience.Private
public class HttpFSExceptionProvider extends ExceptionProvider {
private static Logger AUDIT_LOG = LoggerFactory.getLogger("httpfsaudit");
private static Logger LOG = LoggerFactory.getLogger(HttpFSExceptionProvider.class);
/**
* Maps different exceptions thrown by HttpFSServer to HTTP status codes.
* <ul>
* <li>SecurityException : HTTP UNAUTHORIZED</li>
* <li>FileNotFoundException : HTTP NOT_FOUND</li>
* <li>IOException : INTERNAL_HTTP SERVER_ERROR</li>
* <li>UnsupporteOperationException : HTTP BAD_REQUEST</li>
* <li>all other exceptions : HTTP INTERNAL_SERVER_ERROR </li>
* </ul>
*
* @param throwable exception thrown.
*
* @return mapped HTTP status code
*/
@Override
public Response toResponse(Throwable throwable) {
Response.Status status;
if (throwable instanceof FileSystemAccessException) {
throwable = throwable.getCause();
}
if (throwable instanceof ContainerException) {
throwable = throwable.getCause();
}
if (throwable instanceof SecurityException) {
status = Response.Status.UNAUTHORIZED;
} else if (throwable instanceof FileNotFoundException) {
status = Response.Status.NOT_FOUND;
} else if (throwable instanceof IOException) {
status = Response.Status.INTERNAL_SERVER_ERROR;
} else if (throwable instanceof UnsupportedOperationException) {
status = Response.Status.BAD_REQUEST;
} else if (throwable instanceof IllegalArgumentException) {
status = Response.Status.BAD_REQUEST;
} else {
status = Response.Status.INTERNAL_SERVER_ERROR;
}
return createResponse(status, throwable);
}
/**
* Logs the HTTP status code and exception in HttpFSServer's log.
*
* @param status HTTP status code.
* @param throwable exception thrown.
*/
@Override
protected void log(Response.Status status, Throwable throwable) {
String method = MDC.get("method");
String path = MDC.get("path");
String message = getOneLineMessage(throwable);
AUDIT_LOG.warn("FAILED [{}:{}] response [{}] {}", new Object[]{method, path, status, message});
LOG.warn("[{}:{}] response [{}] {}", new Object[]{method, path, status, message}, throwable);
}
}
| 3,681 | 36.191919 | 99 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/CheckUploadContentTypeFilter.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.http.server;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.fs.http.client.HttpFSFileSystem;
import org.apache.hadoop.util.StringUtils;
import javax.servlet.Filter;
import javax.servlet.FilterChain;
import javax.servlet.FilterConfig;
import javax.servlet.ServletException;
import javax.servlet.ServletRequest;
import javax.servlet.ServletResponse;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import java.io.IOException;
import java.util.HashSet;
import java.util.Set;
/**
* Filter that Enforces the content-type to be application/octet-stream for
* POST and PUT requests.
*/
@InterfaceAudience.Private
public class CheckUploadContentTypeFilter implements Filter {
private static final Set<String> UPLOAD_OPERATIONS = new HashSet<String>();
static {
UPLOAD_OPERATIONS.add(HttpFSFileSystem.Operation.APPEND.toString());
UPLOAD_OPERATIONS.add(HttpFSFileSystem.Operation.CREATE.toString());
}
/**
* Initializes the filter.
* <p>
* This implementation is a NOP.
*
* @param config filter configuration.
*
* @throws ServletException thrown if the filter could not be initialized.
*/
@Override
public void init(FilterConfig config) throws ServletException {
}
/**
* Enforces the content-type to be application/octet-stream for
* POST and PUT requests.
*
* @param request servlet request.
* @param response servlet response.
* @param chain filter chain.
*
* @throws IOException thrown if an IO error occurs.
* @throws ServletException thrown if a servlet error occurs.
*/
@Override
public void doFilter(ServletRequest request, ServletResponse response,
FilterChain chain)
throws IOException, ServletException {
boolean contentTypeOK = true;
HttpServletRequest httpReq = (HttpServletRequest) request;
HttpServletResponse httpRes = (HttpServletResponse) response;
String method = httpReq.getMethod();
if (method.equals("PUT") || method.equals("POST")) {
String op = httpReq.getParameter(HttpFSFileSystem.OP_PARAM);
if (op != null && UPLOAD_OPERATIONS.contains(
StringUtils.toUpperCase(op))) {
if ("true".equalsIgnoreCase(httpReq.getParameter(HttpFSParametersProvider.DataParam.NAME))) {
String contentType = httpReq.getContentType();
contentTypeOK =
HttpFSFileSystem.UPLOAD_CONTENT_TYPE.equalsIgnoreCase(contentType);
}
}
}
if (contentTypeOK) {
chain.doFilter(httpReq, httpRes);
}
else {
httpRes.sendError(HttpServletResponse.SC_BAD_REQUEST,
"Data upload requests must have content-type set to '" +
HttpFSFileSystem.UPLOAD_CONTENT_TYPE + "'");
}
}
/**
* Destroys the filter.
* <p>
* This implementation is a NOP.
*/
@Override
public void destroy() {
}
}
| 3,787 | 31.655172 | 101 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSParametersProvider.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.http.server;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.fs.XAttrCodec;
import org.apache.hadoop.fs.XAttrSetFlag;
import org.apache.hadoop.fs.http.client.HttpFSFileSystem;
import org.apache.hadoop.fs.http.client.HttpFSFileSystem.Operation;
import org.apache.hadoop.lib.wsrs.BooleanParam;
import org.apache.hadoop.lib.wsrs.EnumParam;
import org.apache.hadoop.lib.wsrs.EnumSetParam;
import org.apache.hadoop.lib.wsrs.LongParam;
import org.apache.hadoop.lib.wsrs.Param;
import org.apache.hadoop.lib.wsrs.ParametersProvider;
import org.apache.hadoop.lib.wsrs.ShortParam;
import org.apache.hadoop.lib.wsrs.StringParam;
import org.apache.hadoop.util.StringUtils;
import javax.ws.rs.ext.Provider;
import java.util.HashMap;
import java.util.Map;
import java.util.regex.Pattern;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_WEBHDFS_ACL_PERMISSION_PATTERN_DEFAULT;
/**
* HttpFS ParametersProvider.
*/
@Provider
@InterfaceAudience.Private
@SuppressWarnings("unchecked")
public class HttpFSParametersProvider extends ParametersProvider {
private static final Map<Enum, Class<Param<?>>[]> PARAMS_DEF =
new HashMap<Enum, Class<Param<?>>[]>();
static {
PARAMS_DEF.put(Operation.OPEN,
new Class[]{OffsetParam.class, LenParam.class});
PARAMS_DEF.put(Operation.GETFILESTATUS, new Class[]{});
PARAMS_DEF.put(Operation.LISTSTATUS, new Class[]{FilterParam.class});
PARAMS_DEF.put(Operation.GETHOMEDIRECTORY, new Class[]{});
PARAMS_DEF.put(Operation.GETCONTENTSUMMARY, new Class[]{});
PARAMS_DEF.put(Operation.GETFILECHECKSUM, new Class[]{});
PARAMS_DEF.put(Operation.GETFILEBLOCKLOCATIONS, new Class[]{});
PARAMS_DEF.put(Operation.GETACLSTATUS, new Class[]{});
PARAMS_DEF.put(Operation.INSTRUMENTATION, new Class[]{});
PARAMS_DEF.put(Operation.APPEND, new Class[]{DataParam.class});
PARAMS_DEF.put(Operation.CONCAT, new Class[]{SourcesParam.class});
PARAMS_DEF.put(Operation.TRUNCATE, new Class[]{NewLengthParam.class});
PARAMS_DEF.put(Operation.CREATE,
new Class[]{PermissionParam.class, OverwriteParam.class,
ReplicationParam.class, BlockSizeParam.class, DataParam.class});
PARAMS_DEF.put(Operation.MKDIRS, new Class[]{PermissionParam.class});
PARAMS_DEF.put(Operation.RENAME, new Class[]{DestinationParam.class});
PARAMS_DEF.put(Operation.SETOWNER,
new Class[]{OwnerParam.class, GroupParam.class});
PARAMS_DEF.put(Operation.SETPERMISSION, new Class[]{PermissionParam.class});
PARAMS_DEF.put(Operation.SETREPLICATION,
new Class[]{ReplicationParam.class});
PARAMS_DEF.put(Operation.SETTIMES,
new Class[]{ModifiedTimeParam.class, AccessTimeParam.class});
PARAMS_DEF.put(Operation.DELETE, new Class[]{RecursiveParam.class});
PARAMS_DEF.put(Operation.SETACL, new Class[]{AclPermissionParam.class});
PARAMS_DEF.put(Operation.REMOVEACL, new Class[]{});
PARAMS_DEF.put(Operation.MODIFYACLENTRIES,
new Class[]{AclPermissionParam.class});
PARAMS_DEF.put(Operation.REMOVEACLENTRIES,
new Class[]{AclPermissionParam.class});
PARAMS_DEF.put(Operation.REMOVEDEFAULTACL, new Class[]{});
PARAMS_DEF.put(Operation.SETXATTR,
new Class[]{XAttrNameParam.class, XAttrValueParam.class,
XAttrSetFlagParam.class});
PARAMS_DEF.put(Operation.REMOVEXATTR, new Class[]{XAttrNameParam.class});
PARAMS_DEF.put(Operation.GETXATTRS,
new Class[]{XAttrNameParam.class, XAttrEncodingParam.class});
PARAMS_DEF.put(Operation.LISTXATTRS, new Class[]{});
}
public HttpFSParametersProvider() {
super(HttpFSFileSystem.OP_PARAM, HttpFSFileSystem.Operation.class,
PARAMS_DEF);
}
/**
* Class for access-time parameter.
*/
@InterfaceAudience.Private
public static class AccessTimeParam extends LongParam {
/**
* Parameter name.
*/
public static final String NAME = HttpFSFileSystem.ACCESS_TIME_PARAM;
/**
* Constructor.
*/
public AccessTimeParam() {
super(NAME, -1l);
}
}
/**
* Class for block-size parameter.
*/
@InterfaceAudience.Private
public static class BlockSizeParam extends LongParam {
/**
* Parameter name.
*/
public static final String NAME = HttpFSFileSystem.BLOCKSIZE_PARAM;
/**
* Constructor.
*/
public BlockSizeParam() {
super(NAME, -1l);
}
}
/**
* Class for data parameter.
*/
@InterfaceAudience.Private
public static class DataParam extends BooleanParam {
/**
* Parameter name.
*/
public static final String NAME = "data";
/**
* Constructor.
*/
public DataParam() {
super(NAME, false);
}
}
/**
* Class for operation parameter.
*/
@InterfaceAudience.Private
public static class OperationParam extends EnumParam<HttpFSFileSystem.Operation> {
/**
* Parameter name.
*/
public static final String NAME = HttpFSFileSystem.OP_PARAM;
/**
* Constructor.
*/
public OperationParam(String operation) {
super(NAME, HttpFSFileSystem.Operation.class,
HttpFSFileSystem.Operation.valueOf(
StringUtils.toUpperCase(operation)));
}
}
/**
* Class for delete's recursive parameter.
*/
@InterfaceAudience.Private
public static class RecursiveParam extends BooleanParam {
/**
* Parameter name.
*/
public static final String NAME = HttpFSFileSystem.RECURSIVE_PARAM;
/**
* Constructor.
*/
public RecursiveParam() {
super(NAME, false);
}
}
/**
* Class for filter parameter.
*/
@InterfaceAudience.Private
public static class FilterParam extends StringParam {
/**
* Parameter name.
*/
public static final String NAME = "filter";
/**
* Constructor.
*/
public FilterParam() {
super(NAME, null);
}
}
/**
* Class for group parameter.
*/
@InterfaceAudience.Private
public static class GroupParam extends StringParam {
/**
* Parameter name.
*/
public static final String NAME = HttpFSFileSystem.GROUP_PARAM;
/**
* Constructor.
*/
public GroupParam() {
super(NAME, null);
}
}
/**
* Class for len parameter.
*/
@InterfaceAudience.Private
public static class LenParam extends LongParam {
/**
* Parameter name.
*/
public static final String NAME = "length";
/**
* Constructor.
*/
public LenParam() {
super(NAME, -1l);
}
}
/**
* Class for modified-time parameter.
*/
@InterfaceAudience.Private
public static class ModifiedTimeParam extends LongParam {
/**
* Parameter name.
*/
public static final String NAME = HttpFSFileSystem.MODIFICATION_TIME_PARAM;
/**
* Constructor.
*/
public ModifiedTimeParam() {
super(NAME, -1l);
}
}
/**
* Class for offset parameter.
*/
@InterfaceAudience.Private
public static class OffsetParam extends LongParam {
/**
* Parameter name.
*/
public static final String NAME = "offset";
/**
* Constructor.
*/
public OffsetParam() {
super(NAME, 0l);
}
}
/**
* Class for newlength parameter.
*/
@InterfaceAudience.Private
public static class NewLengthParam extends LongParam {
/**
* Parameter name.
*/
public static final String NAME = HttpFSFileSystem.NEW_LENGTH_PARAM;
/**
* Constructor.
*/
public NewLengthParam() {
super(NAME, 0l);
}
}
/**
* Class for overwrite parameter.
*/
@InterfaceAudience.Private
public static class OverwriteParam extends BooleanParam {
/**
* Parameter name.
*/
public static final String NAME = HttpFSFileSystem.OVERWRITE_PARAM;
/**
* Constructor.
*/
public OverwriteParam() {
super(NAME, true);
}
}
/**
* Class for owner parameter.
*/
@InterfaceAudience.Private
public static class OwnerParam extends StringParam {
/**
* Parameter name.
*/
public static final String NAME = HttpFSFileSystem.OWNER_PARAM;
/**
* Constructor.
*/
public OwnerParam() {
super(NAME, null);
}
}
/**
* Class for permission parameter.
*/
@InterfaceAudience.Private
public static class PermissionParam extends ShortParam {
/**
* Parameter name.
*/
public static final String NAME = HttpFSFileSystem.PERMISSION_PARAM;
/**
* Constructor.
*/
public PermissionParam() {
super(NAME, HttpFSFileSystem.DEFAULT_PERMISSION, 8);
}
}
/**
* Class for AclPermission parameter.
*/
@InterfaceAudience.Private
public static class AclPermissionParam extends StringParam {
/**
* Parameter name.
*/
public static final String NAME = HttpFSFileSystem.ACLSPEC_PARAM;
/**
* Constructor.
*/
public AclPermissionParam() {
super(NAME, HttpFSFileSystem.ACLSPEC_DEFAULT,
Pattern.compile(DFS_WEBHDFS_ACL_PERMISSION_PATTERN_DEFAULT));
}
}
/**
* Class for replication parameter.
*/
@InterfaceAudience.Private
public static class ReplicationParam extends ShortParam {
/**
* Parameter name.
*/
public static final String NAME = HttpFSFileSystem.REPLICATION_PARAM;
/**
* Constructor.
*/
public ReplicationParam() {
super(NAME, (short) -1);
}
}
/**
* Class for concat sources parameter.
*/
@InterfaceAudience.Private
public static class SourcesParam extends StringParam {
/**
* Parameter name.
*/
public static final String NAME = HttpFSFileSystem.SOURCES_PARAM;
/**
* Constructor.
*/
public SourcesParam() {
super(NAME, null);
}
}
/**
* Class for to-path parameter.
*/
@InterfaceAudience.Private
public static class DestinationParam extends StringParam {
/**
* Parameter name.
*/
public static final String NAME = HttpFSFileSystem.DESTINATION_PARAM;
/**
* Constructor.
*/
public DestinationParam() {
super(NAME, null);
}
}
/**
* Class for xattr parameter.
*/
@InterfaceAudience.Private
public static class XAttrNameParam extends StringParam {
public static final String XATTR_NAME_REGX =
"^(user\\.|trusted\\.|system\\.|security\\.).+";
/**
* Parameter name.
*/
public static final String NAME = HttpFSFileSystem.XATTR_NAME_PARAM;
private static final Pattern pattern = Pattern.compile(XATTR_NAME_REGX);
/**
* Constructor.
*/
public XAttrNameParam() {
super(NAME, null, pattern);
}
}
/**
* Class for xattr parameter.
*/
@InterfaceAudience.Private
public static class XAttrValueParam extends StringParam {
/**
* Parameter name.
*/
public static final String NAME = HttpFSFileSystem.XATTR_VALUE_PARAM;
/**
* Constructor.
*/
public XAttrValueParam() {
super(NAME, null);
}
}
/**
* Class for xattr parameter.
*/
@InterfaceAudience.Private
public static class XAttrSetFlagParam extends EnumSetParam<XAttrSetFlag> {
/**
* Parameter name.
*/
public static final String NAME = HttpFSFileSystem.XATTR_SET_FLAG_PARAM;
/**
* Constructor.
*/
public XAttrSetFlagParam() {
super(NAME, XAttrSetFlag.class, null);
}
}
/**
* Class for xattr parameter.
*/
@InterfaceAudience.Private
public static class XAttrEncodingParam extends EnumParam<XAttrCodec> {
/**
* Parameter name.
*/
public static final String NAME = HttpFSFileSystem.XATTR_ENCODING_PARAM;
/**
* Constructor.
*/
public XAttrEncodingParam() {
super(NAME, XAttrCodec.class, null);
}
}
}
| 12,726 | 23.288168 | 94 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServerWebApp.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.http.server;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.lib.server.ServerException;
import org.apache.hadoop.lib.service.FileSystemAccess;
import org.apache.hadoop.lib.servlet.ServerWebApp;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
/**
* Bootstrap class that manages the initialization and destruction of the
* HttpFSServer server, it is a <code>javax.servlet.ServletContextListener
* </code> implementation that is wired in HttpFSServer's WAR
* <code>WEB-INF/web.xml</code>.
* <p>
* It provides acces to the server context via the singleton {@link #get}.
* <p>
* All the configuration is loaded from configuration properties prefixed
* with <code>httpfs.</code>.
*/
@InterfaceAudience.Private
public class HttpFSServerWebApp extends ServerWebApp {
private static final Logger LOG =
LoggerFactory.getLogger(HttpFSServerWebApp.class);
/**
* Server name and prefix for all configuration properties.
*/
public static final String NAME = "httpfs";
/**
* Configuration property that defines HttpFSServer admin group.
*/
public static final String CONF_ADMIN_GROUP = "admin.group";
private static HttpFSServerWebApp SERVER;
private String adminGroup;
/**
* Default constructor.
*
* @throws IOException thrown if the home/conf/log/temp directory paths
* could not be resolved.
*/
public HttpFSServerWebApp() throws IOException {
super(NAME);
}
/**
* Constructor used for testing purposes.
*/
public HttpFSServerWebApp(String homeDir, String configDir, String logDir,
String tempDir, Configuration config) {
super(NAME, homeDir, configDir, logDir, tempDir, config);
}
/**
* Constructor used for testing purposes.
*/
public HttpFSServerWebApp(String homeDir, Configuration config) {
super(NAME, homeDir, config);
}
/**
* Initializes the HttpFSServer server, loads configuration and required
* services.
*
* @throws ServerException thrown if HttpFSServer server could not be
* initialized.
*/
@Override
public void init() throws ServerException {
if (SERVER != null) {
throw new RuntimeException("HttpFSServer server already initialized");
}
SERVER = this;
super.init();
adminGroup = getConfig().get(getPrefixedName(CONF_ADMIN_GROUP), "admin");
LOG.info("Connects to Namenode [{}]",
get().get(FileSystemAccess.class).getFileSystemConfiguration().
get(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY));
}
/**
* Shutdowns all running services.
*/
@Override
public void destroy() {
SERVER = null;
super.destroy();
}
/**
* Returns HttpFSServer server singleton, configuration and services are
* accessible through it.
*
* @return the HttpFSServer server singleton.
*/
public static HttpFSServerWebApp get() {
return SERVER;
}
/**
* Returns HttpFSServer admin group.
*
* @return httpfs admin group.
*/
public String getAdminGroup() {
return adminGroup;
}
}
| 4,065 | 28.897059 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSAuthenticationFilter.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.http.server;
import org.apache.commons.io.Charsets;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.web.WebHdfsConstants;
import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
import org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticationFilter;
import org.apache.hadoop.security.token.delegation.web.KerberosDelegationTokenAuthenticationHandler;
import javax.servlet.FilterConfig;
import javax.servlet.ServletException;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.InputStreamReader;
import java.io.Reader;
import java.util.Map;
import java.util.Properties;
/**
* Subclass of hadoop-auth <code>AuthenticationFilter</code> that obtains its configuration
* from HttpFSServer's server configuration.
*/
@InterfaceAudience.Private
public class HttpFSAuthenticationFilter
extends DelegationTokenAuthenticationFilter {
private static final String CONF_PREFIX = "httpfs.authentication.";
private static final String SIGNATURE_SECRET_FILE = SIGNATURE_SECRET + ".file";
/**
* Returns the hadoop-auth configuration from HttpFSServer's configuration.
* <p>
* It returns all HttpFSServer's configuration properties prefixed with
* <code>httpfs.authentication</code>. The <code>httpfs.authentication</code>
* prefix is removed from the returned property names.
*
* @param configPrefix parameter not used.
* @param filterConfig parameter not used.
*
* @return hadoop-auth configuration read from HttpFSServer's configuration.
*/
@Override
protected Properties getConfiguration(String configPrefix,
FilterConfig filterConfig) throws ServletException{
Properties props = new Properties();
Configuration conf = HttpFSServerWebApp.get().getConfig();
props.setProperty(AuthenticationFilter.COOKIE_PATH, "/");
for (Map.Entry<String, String> entry : conf) {
String name = entry.getKey();
if (name.startsWith(CONF_PREFIX)) {
String value = conf.get(name);
name = name.substring(CONF_PREFIX.length());
props.setProperty(name, value);
}
}
String signatureSecretFile = props.getProperty(SIGNATURE_SECRET_FILE, null);
if (signatureSecretFile == null) {
throw new RuntimeException("Undefined property: " + SIGNATURE_SECRET_FILE);
}
try {
StringBuilder secret = new StringBuilder();
Reader reader = new InputStreamReader(new FileInputStream(
signatureSecretFile), Charsets.UTF_8);
int c = reader.read();
while (c > -1) {
secret.append((char)c);
c = reader.read();
}
reader.close();
props.setProperty(AuthenticationFilter.SIGNATURE_SECRET, secret.toString());
} catch (IOException ex) {
throw new RuntimeException("Could not read HttpFS signature secret file: " + signatureSecretFile);
}
setAuthHandlerClass(props);
props.setProperty(KerberosDelegationTokenAuthenticationHandler.TOKEN_KIND,
WebHdfsConstants.WEBHDFS_TOKEN_KIND.toString());
return props;
}
protected Configuration getProxyuserConfiguration(FilterConfig filterConfig) {
Map<String, String> proxyuserConf = HttpFSServerWebApp.get().getConfig().
getValByRegex("httpfs\\.proxyuser\\.");
Configuration conf = new Configuration(false);
for (Map.Entry<String, String> entry : proxyuserConf.entrySet()) {
conf.set(entry.getKey().substring("httpfs.".length()), entry.getValue());
}
return conf;
}
}
| 4,420 | 37.780702 | 104 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/lang/XException.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.lib.lang;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.lib.util.Check;
import java.text.MessageFormat;
/**
* Generic exception that requires error codes and uses the a message
* template from the error code.
*/
@InterfaceAudience.Private
public class XException extends Exception {
/**
* Interface to define error codes.
*/
public static interface ERROR {
/**
* Returns the template for the error.
*
* @return the template for the error, the template must be in JDK
* <code>MessageFormat</code> syntax (using {#} positional parameters).
*/
public String getTemplate();
}
private ERROR error;
/**
* Private constructor used by the public constructors.
*
* @param error error code.
* @param message error message.
* @param cause exception cause if any.
*/
private XException(ERROR error, String message, Throwable cause) {
super(message, cause);
this.error = error;
}
/**
* Creates an XException using another XException as cause.
* <p>
* The error code and error message are extracted from the cause.
*
* @param cause exception cause.
*/
public XException(XException cause) {
this(cause.getError(), cause.getMessage(), cause);
}
/**
* Creates an XException using the specified error code. The exception
* message is resolved using the error code template and the passed
* parameters.
*
* @param error error code for the XException.
* @param params parameters to use when creating the error message
* with the error code template.
*/
@SuppressWarnings({"ThrowableResultOfMethodCallIgnored"})
public XException(ERROR error, Object... params) {
this(Check.notNull(error, "error"), format(error, params), getCause(params));
}
/**
* Returns the error code of the exception.
*
* @return the error code of the exception.
*/
public ERROR getError() {
return error;
}
/**
* Creates a message using a error message template and arguments.
* <p>
* The template must be in JDK <code>MessageFormat</code> syntax
* (using {#} positional parameters).
*
* @param error error code, to get the template from.
* @param args arguments to use for creating the message.
*
* @return the resolved error message.
*/
private static String format(ERROR error, Object... args) {
String template = error.getTemplate();
if (template == null) {
StringBuilder sb = new StringBuilder();
for (int i = 0; i < args.length; i++) {
sb.append(" {").append(i).append("}");
}
template = sb.deleteCharAt(0).toString();
}
return error + ": " + MessageFormat.format(template, args);
}
/**
* Returns the last parameter if it is an instance of <code>Throwable</code>
* returns it else it returns NULL.
*
* @param params parameters to look for a cause.
*
* @return the last parameter if it is an instance of <code>Throwable</code>
* returns it else it returns NULL.
*/
private static Throwable getCause(Object... params) {
Throwable throwable = null;
if (params != null && params.length > 0 && params[params.length - 1] instanceof Throwable) {
throwable = (Throwable) params[params.length - 1];
}
return throwable;
}
}
| 4,178 | 29.50365 | 96 |
java
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.