use of org.apache.hadoop.ozone.container.common.helpers.BlockData in project ozone by apache.
the class DeleteBlocksCommandHandler method markBlocksForDeletionSchemaV1.
private void markBlocksForDeletionSchemaV1(KeyValueContainerData containerData, DeletedBlocksTransaction delTX) throws IOException {
long containerId = delTX.getContainerID();
if (!isTxnIdValid(containerId, containerData, delTX)) {
return;
}
int newDeletionBlocks = 0;
try (ReferenceCountedDB containerDB = BlockUtils.getDB(containerData, conf)) {
Table<String, BlockData> blockDataTable = containerDB.getStore().getBlockDataTable();
Table<String, ChunkInfoList> deletedBlocksTable = containerDB.getStore().getDeletedBlocksTable();
try (BatchOperation batch = containerDB.getStore().getBatchHandler().initBatchOperation()) {
for (Long blkLong : delTX.getLocalIDList()) {
String blk = blkLong.toString();
BlockData blkInfo = blockDataTable.get(blk);
if (blkInfo != null) {
String deletingKey = OzoneConsts.DELETING_KEY_PREFIX + blk;
if (blockDataTable.get(deletingKey) != null || deletedBlocksTable.get(blk) != null) {
if (LOG.isDebugEnabled()) {
LOG.debug(String.format("Ignoring delete for block %s in container %d." + " Entry already added.", blk, containerId));
}
continue;
}
// Found the block in container db,
// use an atomic update to change its state to deleting.
blockDataTable.putWithBatch(batch, deletingKey, blkInfo);
blockDataTable.deleteWithBatch(batch, blk);
newDeletionBlocks++;
if (LOG.isDebugEnabled()) {
LOG.debug("Transited Block {} to DELETING state in container {}", blk, containerId);
}
} else {
if (LOG.isDebugEnabled()) {
LOG.debug("Block {} not found or already under deletion in" + " container {}, skip deleting it.", blk, containerId);
}
}
}
updateMetaData(containerData, delTX, newDeletionBlocks, containerDB, batch);
containerDB.getStore().getBatchHandler().commitBatchOperation(batch);
} catch (IOException e) {
// with a certain number of retries.
throw new IOException("Failed to delete blocks for TXID = " + delTX.getTxID(), e);
}
}
}
use of org.apache.hadoop.ozone.container.common.helpers.BlockData in project ozone by apache.
the class ContainerTestHelper method newPutBlockRequestBuilder.
public static Builder newPutBlockRequestBuilder(Pipeline pipeline, ContainerProtos.WriteChunkRequestProtoOrBuilder writeRequest) throws IOException {
LOG.trace("putBlock: {} to pipeline={}", writeRequest.getBlockID(), pipeline);
ContainerProtos.PutBlockRequestProto.Builder putRequest = ContainerProtos.PutBlockRequestProto.newBuilder();
BlockData blockData = new BlockData(BlockID.getFromProtobuf(writeRequest.getBlockID()));
List<ContainerProtos.ChunkInfo> newList = new LinkedList<>();
newList.add(writeRequest.getChunkData());
blockData.setChunks(newList);
blockData.setBlockCommitSequenceId(0);
putRequest.setBlockData(blockData.getProtoBufMessage());
Builder request = ContainerCommandRequestProto.newBuilder();
request.setCmdType(ContainerProtos.Type.PutBlock);
request.setContainerID(blockData.getContainerID());
request.setPutBlock(putRequest);
request.setDatanodeUuid(pipeline.getFirstNode().getUuidString());
return request;
}
use of org.apache.hadoop.ozone.container.common.helpers.BlockData in project ozone by apache.
the class BlockManagerImpl method getBlockByID.
private BlockData getBlockByID(ReferenceCountedDB db, BlockID blockID) throws IOException {
String blockKey = Long.toString(blockID.getLocalID());
BlockData blockData = db.getStore().getBlockDataTable().get(blockKey);
if (blockData == null) {
throw new StorageContainerException(NO_SUCH_BLOCK_ERR_MSG, NO_SUCH_BLOCK);
}
return blockData;
}
use of org.apache.hadoop.ozone.container.common.helpers.BlockData in project ozone by apache.
the class TestStorageContainerManagerHelper method getPendingDeletionBlocks.
public List<String> getPendingDeletionBlocks(Long containerID) throws IOException {
List<String> pendingDeletionBlocks = Lists.newArrayList();
ReferenceCountedDB meta = getContainerMetadata(containerID);
KeyPrefixFilter filter = new KeyPrefixFilter().addFilter(OzoneConsts.DELETING_KEY_PREFIX);
List<? extends Table.KeyValue<String, BlockData>> kvs = meta.getStore().getBlockDataTable().getRangeKVs(null, Integer.MAX_VALUE, filter);
for (Table.KeyValue<String, BlockData> entry : kvs) {
pendingDeletionBlocks.add(entry.getKey().replace(OzoneConsts.DELETING_KEY_PREFIX, ""));
}
meta.close();
return pendingDeletionBlocks;
}
use of org.apache.hadoop.ozone.container.common.helpers.BlockData in project ozone by apache.
the class TestFailureHandlingByClient method testBlockCountOnFailures.
/**
* Test whether blockData and Container metadata (block count and used
* bytes) is updated correctly when there is a write failure.
* We can combine this test with {@link #testBlockWritesWithDnFailures()}
* as that test also simulates a write failure and client writes failed
* chunk writes to a new block.
*/
private void testBlockCountOnFailures(OmKeyInfo omKeyInfo) throws Exception {
// testBlockWritesWithDnFailures writes chunkSize*2.5 size of data into
// KeyOutputStream. But before closing the outputStream, 2 of the DNs in
// the pipeline being written to are closed. So the key will be written
// to 2 blocks as atleast the last 0.5 chunk would not be committed to the
// first block before the stream is closed.
/**
* There are 3 possible scenarios:
* 1. Block1 has 2 chunks and OMKeyInfo also has 2 chunks against this block
* => Block2 should have 1 chunk
* (2 chunks were written to Block1, committed and acknowledged by
* CommitWatcher)
* 2. Block1 has 1 chunk and OMKeyInfo has 1 chunk against this block
* => Block2 should have 2 chunks
* (Possibly 2 chunks were written but only 1 was committed to the
* block)
* 3. Block1 has 2 chunks but OMKeyInfo has only 1 chunk against this block
* => Block2 should have 2 chunks
* (This happens when the 2nd chunk has been committed to Block1 but
* not acknowledged by CommitWatcher before pipeline shutdown)
*/
// Get information about the first and second block (in different pipelines)
List<OmKeyLocationInfo> locationList = omKeyInfo.getLatestVersionLocations().getLocationList();
long containerId1 = locationList.get(0).getContainerID();
List<DatanodeDetails> block1DNs = locationList.get(0).getPipeline().getNodes();
long containerId2 = locationList.get(1).getContainerID();
List<DatanodeDetails> block2DNs = locationList.get(1).getPipeline().getNodes();
int block2ExpectedChunkCount;
if (locationList.get(0).getLength() == 2 * chunkSize) {
// Scenario 1
block2ExpectedChunkCount = 1;
} else {
// Scenario 2
block2ExpectedChunkCount = 2;
}
// For the first block, first 2 DNs in the pipeline are shutdown (to
// simulate a failure). It should have 1 or 2 chunks (depending on
// whether the DN CommitWatcher successfully acknowledged the 2nd chunk
// write or not). The 3rd chunk would not exist on the first pipeline as
// the pipeline would be closed before the last 0.5 chunk was committed
// to the block.
KeyValueContainerData containerData1 = ((KeyValueContainer) cluster.getHddsDatanode(block1DNs.get(2)).getDatanodeStateMachine().getContainer().getContainerSet().getContainer(containerId1)).getContainerData();
try (ReferenceCountedDB containerDb1 = BlockUtils.getDB(containerData1, conf)) {
BlockData blockData1 = containerDb1.getStore().getBlockDataTable().get(Long.toString(locationList.get(0).getBlockID().getLocalID()));
// The first Block could have 1 or 2 chunkSize of data
int block1NumChunks = blockData1.getChunks().size();
Assert.assertTrue(block1NumChunks >= 1);
Assert.assertEquals(chunkSize * block1NumChunks, blockData1.getSize());
Assert.assertEquals(1, containerData1.getBlockCount());
Assert.assertEquals(chunkSize * block1NumChunks, containerData1.getBytesUsed());
}
// Verify that the second block has the remaining 0.5*chunkSize of data
KeyValueContainerData containerData2 = ((KeyValueContainer) cluster.getHddsDatanode(block2DNs.get(0)).getDatanodeStateMachine().getContainer().getContainerSet().getContainer(containerId2)).getContainerData();
try (ReferenceCountedDB containerDb2 = BlockUtils.getDB(containerData2, conf)) {
BlockData blockData2 = containerDb2.getStore().getBlockDataTable().get(Long.toString(locationList.get(1).getBlockID().getLocalID()));
// The second Block should have 0.5 chunkSize of data
Assert.assertEquals(block2ExpectedChunkCount, blockData2.getChunks().size());
Assert.assertEquals(1, containerData2.getBlockCount());
int expectedBlockSize;
if (block2ExpectedChunkCount == 1) {
expectedBlockSize = chunkSize / 2;
} else {
expectedBlockSize = chunkSize + chunkSize / 2;
}
Assert.assertEquals(expectedBlockSize, blockData2.getSize());
Assert.assertEquals(expectedBlockSize, containerData2.getBytesUsed());
}
}
Aggregations