use of org.apache.hadoop.hdfs.server.datanode.ReplicaBeingWritten in project hadoop by apache.
the class FsDatasetImplTestUtils method createRBW.
@Override
public Replica createRBW(FsVolumeSpi volume, ExtendedBlock eb) throws IOException {
FsVolumeImpl vol = (FsVolumeImpl) volume;
final String bpid = eb.getBlockPoolId();
final Block block = eb.getLocalBlock();
ReplicaBeingWritten rbw = new ReplicaBeingWritten(eb.getLocalBlock(), volume, vol.createRbwFile(bpid, block).getParentFile(), null);
rbw.getBlockFile().createNewFile();
rbw.getMetaFile().createNewFile();
dataset.volumeMap.add(bpid, rbw);
return rbw;
}
use of org.apache.hadoop.hdfs.server.datanode.ReplicaBeingWritten in project hadoop by apache.
the class TestFileAppend method testConcurrentAppendRead.
@Test(timeout = 10000)
public void testConcurrentAppendRead() throws IOException, TimeoutException, InterruptedException {
// Create a finalized replica and append to it
// Read block data and checksum. Verify checksum.
Configuration conf = new HdfsConfiguration();
conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 1024);
conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);
conf.setInt("dfs.min.replication", 1);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
try {
cluster.waitActive();
DataNode dn = cluster.getDataNodes().get(0);
FsDatasetSpi<?> dataSet = DataNodeTestUtils.getFSDataset(dn);
// create a file with 1 byte of data.
long initialFileLength = 1;
DistributedFileSystem fs = cluster.getFileSystem();
Path fileName = new Path("/appendCorruptBlock");
DFSTestUtil.createFile(fs, fileName, initialFileLength, (short) 1, 0);
DFSTestUtil.waitReplication(fs, fileName, (short) 1);
Assert.assertTrue("File not created", fs.exists(fileName));
// Call FsDatasetImpl#append to append the block file,
// which converts it to a rbw replica.
ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, fileName);
long newGS = block.getGenerationStamp() + 1;
ReplicaHandler replicaHandler = dataSet.append(block, newGS, initialFileLength);
// write data to block file
ReplicaBeingWritten rbw = (ReplicaBeingWritten) replicaHandler.getReplica();
ReplicaOutputStreams outputStreams = rbw.createStreams(false, DEFAULT_CHECKSUM);
OutputStream dataOutput = outputStreams.getDataOut();
byte[] appendBytes = new byte[1];
dataOutput.write(appendBytes, 0, 1);
dataOutput.flush();
dataOutput.close();
// update checksum file
final int smallBufferSize = DFSUtilClient.getSmallBufferSize(conf);
FsDatasetUtil.computeChecksum(rbw.getMetaFile(), rbw.getMetaFile(), rbw.getBlockFile(), smallBufferSize, conf);
// read the block
// the DataNode BlockSender should read from the rbw replica's in-memory
// checksum, rather than on-disk checksum. Otherwise it will see a
// checksum mismatch error.
final byte[] readBlock = DFSTestUtil.readFileBuffer(fs, fileName);
assertEquals("should have read only one byte!", 1, readBlock.length);
} finally {
cluster.shutdown();
}
}
use of org.apache.hadoop.hdfs.server.datanode.ReplicaBeingWritten in project hadoop by apache.
the class TestBlockListAsLongs method testUc.
@Test
public void testUc() {
BlockListAsLongs blocks = checkReport(new ReplicaBeingWritten(b1, null, null, null));
assertArrayEquals(new long[] { 0, 1, -1, -1, -1, 1, 11, 111, ReplicaState.RBW.getValue() }, blocks.getBlockListAsLongs());
}
use of org.apache.hadoop.hdfs.server.datanode.ReplicaBeingWritten in project hadoop by apache.
the class TestBlockListAsLongs method testFuzz.
@Test
public void testFuzz() throws InterruptedException {
Replica[] replicas = new Replica[100000];
Random rand = new Random(0);
for (int i = 0; i < replicas.length; i++) {
Block b = new Block(rand.nextLong(), i, i << 4);
switch(rand.nextInt(2)) {
case 0:
replicas[i] = new FinalizedReplica(b, null, null);
break;
case 1:
replicas[i] = new ReplicaBeingWritten(b, null, null, null);
break;
case 2:
replicas[i] = new ReplicaWaitingToBeRecovered(b, null, null);
break;
}
}
checkReport(replicas);
}
use of org.apache.hadoop.hdfs.server.datanode.ReplicaBeingWritten in project hadoop by apache.
the class TestBlockManager method testSafeModeIBRBeforeFirstFullBR.
/**
* test when NN starts and in same mode, it receives an incremental blockReport
* firstly. Then receives first full block report.
*/
@Test
public void testSafeModeIBRBeforeFirstFullBR() throws Exception {
// pretend to be in safemode
doReturn(true).when(fsn).isInStartupSafeMode();
DatanodeDescriptor node = nodes.get(0);
DatanodeStorageInfo ds = node.getStorageInfos()[0];
node.setAlive(true);
DatanodeRegistration nodeReg = new DatanodeRegistration(node, null, null, "");
// register new node
bm.getDatanodeManager().registerDatanode(nodeReg);
bm.getDatanodeManager().addDatanode(node);
assertEquals(node, bm.getDatanodeManager().getDatanode(node));
assertEquals(0, ds.getBlockReportCount());
// Build a incremental report
List<ReceivedDeletedBlockInfo> rdbiList = new ArrayList<>();
// Build a full report
BlockListAsLongs.Builder builder = BlockListAsLongs.builder();
// blk_42 is finalized.
// arbitrary
long receivedBlockId = 42;
BlockInfo receivedBlock = addBlockToBM(receivedBlockId);
rdbiList.add(new ReceivedDeletedBlockInfo(new Block(receivedBlock), ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK, null));
builder.add(new FinalizedReplica(receivedBlock, null, null));
// blk_43 is under construction.
long receivingBlockId = 43;
BlockInfo receivingBlock = addUcBlockToBM(receivingBlockId);
rdbiList.add(new ReceivedDeletedBlockInfo(new Block(receivingBlock), ReceivedDeletedBlockInfo.BlockStatus.RECEIVING_BLOCK, null));
builder.add(new ReplicaBeingWritten(receivingBlock, null, null, null));
// blk_44 has 2 records in IBR. It's finalized. So full BR has 1 record.
long receivingReceivedBlockId = 44;
BlockInfo receivingReceivedBlock = addBlockToBM(receivingReceivedBlockId);
rdbiList.add(new ReceivedDeletedBlockInfo(new Block(receivingReceivedBlock), ReceivedDeletedBlockInfo.BlockStatus.RECEIVING_BLOCK, null));
rdbiList.add(new ReceivedDeletedBlockInfo(new Block(receivingReceivedBlock), ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK, null));
builder.add(new FinalizedReplica(receivingReceivedBlock, null, null));
// blk_45 is not in full BR, because it's deleted.
long ReceivedDeletedBlockId = 45;
rdbiList.add(new ReceivedDeletedBlockInfo(new Block(ReceivedDeletedBlockId), ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK, null));
rdbiList.add(new ReceivedDeletedBlockInfo(new Block(ReceivedDeletedBlockId), ReceivedDeletedBlockInfo.BlockStatus.DELETED_BLOCK, null));
// blk_46 exists in DN for a long time, so it's in full BR, but not in IBR.
long existedBlockId = 46;
BlockInfo existedBlock = addBlockToBM(existedBlockId);
builder.add(new FinalizedReplica(existedBlock, null, null));
// process IBR and full BR
StorageReceivedDeletedBlocks srdb = new StorageReceivedDeletedBlocks(new DatanodeStorage(ds.getStorageID()), rdbiList.toArray(new ReceivedDeletedBlockInfo[rdbiList.size()]));
bm.processIncrementalBlockReport(node, srdb);
// Make sure it's the first full report
assertEquals(0, ds.getBlockReportCount());
bm.processReport(node, new DatanodeStorage(ds.getStorageID()), builder.build(), new BlockReportContext(1, 0, System.nanoTime(), 0, true));
assertEquals(1, ds.getBlockReportCount());
// verify the storage info is correct
assertTrue(bm.getStoredBlock(new Block(receivedBlockId)).findStorageInfo(ds) >= 0);
assertTrue(bm.getStoredBlock(new Block(receivingBlockId)).getUnderConstructionFeature().getNumExpectedLocations() > 0);
assertTrue(bm.getStoredBlock(new Block(receivingReceivedBlockId)).findStorageInfo(ds) >= 0);
assertNull(bm.getStoredBlock(new Block(ReceivedDeletedBlockId)));
assertTrue(bm.getStoredBlock(new Block(existedBlock)).findStorageInfo(ds) >= 0);
}
Aggregations