Skip to content

Commit f4c523b

Browse files
committed
Revert "HDFS-8499. Refactor BlockInfo class hierarchy with static helper class. Contributed by Zhe Zhang."
This reverts commit c17439c.
1 parent 663eba0 commit f4c523b

31 files changed

+234
-419
lines changed

hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -635,9 +635,6 @@ Release 2.8.0 - UNRELEASED
635635
HDFS-7923. The DataNodes should rate-limit their full block reports by
636636
asking the NN on heartbeat messages (cmccabe)
637637

638-
HDFS-8499. Refactor BlockInfo class hierarchy with static helper class.
639-
(Zhe Zhang via wang)
640-
641638
HDFS-8540. Mover should exit with NO_MOVE_BLOCK if no block can be moved.
642639
(surendra singh lilhore via szetszwo)
643640

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -79,7 +79,7 @@ public interface BlockCollection {
7979
* Convert the last block of the collection to an under-construction block
8080
* and set the locations.
8181
*/
82-
public BlockInfoUnderConstruction setLastBlock(BlockInfo lastBlock,
82+
public BlockInfoContiguousUnderConstruction setLastBlock(BlockInfo lastBlock,
8383
DatanodeStorageInfo[] targets) throws IOException;
8484

8585
/**

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java

Lines changed: 11 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -51,7 +51,7 @@ public abstract class BlockInfo extends Block
5151
* per replica is 42 bytes (LinkedList#Entry object per replica) versus 16
5252
* bytes using the triplets.
5353
*/
54-
Object[] triplets;
54+
protected Object[] triplets;
5555

5656
/**
5757
* Construct an entry for blocksmap
@@ -295,7 +295,7 @@ public BlockInfo moveBlockToHead(BlockInfo head,
295295
/**
296296
* BlockInfo represents a block that is not being constructed.
297297
* In order to start modifying the block, the BlockInfo should be converted
298-
* to {@link BlockInfoUnderConstruction}.
298+
* to {@link BlockInfoContiguousUnderConstruction}.
299299
* @return {@link BlockUCState#COMPLETE}
300300
*/
301301
public BlockUCState getBlockUCState() {
@@ -312,29 +312,27 @@ public boolean isComplete() {
312312
}
313313

314314
/**
315-
* Convert a block to an under construction block.
315+
* Convert a complete block to an under construction block.
316316
* @return BlockInfoUnderConstruction - an under construction block.
317317
*/
318-
public BlockInfoUnderConstruction convertToBlockUnderConstruction(
318+
public BlockInfoContiguousUnderConstruction convertToBlockUnderConstruction(
319319
BlockUCState s, DatanodeStorageInfo[] targets) {
320320
if(isComplete()) {
321-
return convertCompleteBlockToUC(s, targets);
321+
BlockInfoContiguousUnderConstruction ucBlock =
322+
new BlockInfoContiguousUnderConstruction(this,
323+
getBlockCollection().getPreferredBlockReplication(), s, targets);
324+
ucBlock.setBlockCollection(getBlockCollection());
325+
return ucBlock;
322326
}
323327
// the block is already under construction
324-
BlockInfoUnderConstruction ucBlock =
325-
(BlockInfoUnderConstruction)this;
328+
BlockInfoContiguousUnderConstruction ucBlock =
329+
(BlockInfoContiguousUnderConstruction)this;
326330
ucBlock.setBlockUCState(s);
327331
ucBlock.setExpectedLocations(targets);
328332
ucBlock.setBlockCollection(getBlockCollection());
329333
return ucBlock;
330334
}
331335

332-
/**
333-
* Convert a complete block to an under construction block.
334-
*/
335-
abstract BlockInfoUnderConstruction convertCompleteBlockToUC(
336-
BlockUCState s, DatanodeStorageInfo[] targets);
337-
338336
@Override
339337
public int hashCode() {
340338
// Super implementation is sufficient

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguous.java

Lines changed: 62 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -19,13 +19,13 @@
1919

2020
import org.apache.hadoop.classification.InterfaceAudience;
2121
import org.apache.hadoop.hdfs.protocol.Block;
22-
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
2322

2423
/**
2524
* Subclass of {@link BlockInfo}, used for a block with replication scheme.
2625
*/
2726
@InterfaceAudience.Private
2827
public class BlockInfoContiguous extends BlockInfo {
28+
public static final BlockInfoContiguous[] EMPTY_ARRAY = {};
2929

3030
public BlockInfoContiguous(short size) {
3131
super(size);
@@ -40,37 +40,84 @@ public BlockInfoContiguous(Block blk, short size) {
4040
* This is used to convert BlockReplicationInfoUnderConstruction
4141
* @param from BlockReplicationInfo to copy from.
4242
*/
43-
protected BlockInfoContiguous(BlockInfo from) {
43+
protected BlockInfoContiguous(BlockInfoContiguous from) {
4444
super(from);
4545
}
4646

47+
/**
48+
* Ensure that there is enough space to include num more triplets.
49+
* @return first free triplet index.
50+
*/
51+
private int ensureCapacity(int num) {
52+
assert this.triplets != null : "BlockInfo is not initialized";
53+
int last = numNodes();
54+
if (triplets.length >= (last+num)*3) {
55+
return last;
56+
}
57+
/* Not enough space left. Create a new array. Should normally
58+
* happen only when replication is manually increased by the user. */
59+
Object[] old = triplets;
60+
triplets = new Object[(last+num)*3];
61+
System.arraycopy(old, 0, triplets, 0, last * 3);
62+
return last;
63+
}
64+
4765
@Override
4866
boolean addStorage(DatanodeStorageInfo storage) {
49-
return ContiguousBlockStorageOp.addStorage(this, storage);
67+
// find the last null node
68+
int lastNode = ensureCapacity(1);
69+
setStorageInfo(lastNode, storage);
70+
setNext(lastNode, null);
71+
setPrevious(lastNode, null);
72+
return true;
5073
}
5174

5275
@Override
5376
boolean removeStorage(DatanodeStorageInfo storage) {
54-
return ContiguousBlockStorageOp.removeStorage(this, storage);
77+
int dnIndex = findStorageInfo(storage);
78+
if (dnIndex < 0) { // the node is not found
79+
return false;
80+
}
81+
assert getPrevious(dnIndex) == null && getNext(dnIndex) == null :
82+
"Block is still in the list and must be removed first.";
83+
// find the last not null node
84+
int lastNode = numNodes()-1;
85+
// replace current node triplet by the lastNode one
86+
setStorageInfo(dnIndex, getStorageInfo(lastNode));
87+
setNext(dnIndex, getNext(lastNode));
88+
setPrevious(dnIndex, getPrevious(lastNode));
89+
// set the last triplet to null
90+
setStorageInfo(lastNode, null);
91+
setNext(lastNode, null);
92+
setPrevious(lastNode, null);
93+
return true;
5594
}
5695

5796
@Override
5897
public int numNodes() {
59-
return ContiguousBlockStorageOp.numNodes(this);
98+
assert this.triplets != null : "BlockInfo is not initialized";
99+
assert triplets.length % 3 == 0 : "Malformed BlockInfo";
100+
101+
for (int idx = getCapacity()-1; idx >= 0; idx--) {
102+
if (getDatanode(idx) != null) {
103+
return idx + 1;
104+
}
105+
}
106+
return 0;
60107
}
61108

62109
@Override
63110
void replaceBlock(BlockInfo newBlock) {
64-
ContiguousBlockStorageOp.replaceBlock(this, newBlock);
65-
}
111+
assert newBlock instanceof BlockInfoContiguous;
112+
for (int i = this.numNodes() - 1; i >= 0; i--) {
113+
final DatanodeStorageInfo storage = this.getStorageInfo(i);
114+
final boolean removed = storage.removeBlock(this);
115+
assert removed : "currentBlock not found.";
66116

67-
@Override
68-
BlockInfoUnderConstruction convertCompleteBlockToUC(
69-
HdfsServerConstants.BlockUCState s, DatanodeStorageInfo[] targets) {
70-
BlockInfoUnderConstructionContiguous ucBlock =
71-
new BlockInfoUnderConstructionContiguous(this,
72-
getBlockCollection().getPreferredBlockReplication(), s, targets);
73-
ucBlock.setBlockCollection(getBlockCollection());
74-
return ucBlock;
117+
final DatanodeStorageInfo.AddBlockResult result = storage.addBlock(
118+
newBlock);
119+
assert result == DatanodeStorageInfo.AddBlockResult.ADDED :
120+
"newBlock already exists.";
121+
}
75122
}
76123
}

0 commit comments

Comments
 (0)