use of org.apache.hadoop.ozone.container.common.helpers.ChunkInfo in project ozone by apache.
the class KeyValueHandler method handleGetSmallFile.
/**
* Handle Get Small File operation. Gets a data stream using a key. This
* helps in reducing the RPC overhead for small files. Calls BlockManager and
* ChunkManager to process the request.
*/
ContainerCommandResponseProto handleGetSmallFile(ContainerCommandRequestProto request, KeyValueContainer kvContainer) {
if (!request.hasGetSmallFile()) {
if (LOG.isDebugEnabled()) {
LOG.debug("Malformed Get Small File request. trace ID: {}", request.getTraceID());
}
return malformedRequest(request);
}
GetSmallFileRequestProto getSmallFileReq = request.getGetSmallFile();
try {
BlockID blockID = BlockID.getFromProtobuf(getSmallFileReq.getBlock().getBlockID());
checkContainerIsHealthy(kvContainer, blockID, Type.GetSmallFile);
BlockData responseData = blockManager.getBlock(kvContainer, blockID);
ContainerProtos.ChunkInfo chunkInfoProto = null;
List<ByteString> dataBuffers = new ArrayList<>();
DispatcherContext dispatcherContext = new DispatcherContext.Builder().build();
for (ContainerProtos.ChunkInfo chunk : responseData.getChunks()) {
// if the block is committed, all chunks must have been committed.
// Tmp chunk files won't exist here.
ChunkInfo chunkInfo = ChunkInfo.getFromProtoBuf(chunk);
boolean isReadChunkV0 = getReadChunkVersion(request.getGetSmallFile()).equals(ContainerProtos.ReadChunkVersion.V0);
if (isReadChunkV0) {
// For older clients, set ReadDataIntoSingleBuffer to true so that
// all the data read from chunk file is returned as a single
// ByteString. Older clients cannot process data returned as a list
// of ByteStrings.
chunkInfo.setReadDataIntoSingleBuffer(true);
}
ChunkBuffer data = chunkManager.readChunk(kvContainer, blockID, chunkInfo, dispatcherContext);
dataBuffers.addAll(data.toByteStringList(byteBufferToByteString));
chunkInfoProto = chunk;
}
metrics.incContainerBytesStats(Type.GetSmallFile, BufferUtils.getBuffersLen(dataBuffers));
return getGetSmallFileResponseSuccess(request, dataBuffers, chunkInfoProto);
} catch (StorageContainerException e) {
return ContainerUtils.logAndReturnError(LOG, e, request);
} catch (IOException ex) {
return ContainerUtils.logAndReturnError(LOG, new StorageContainerException("Write Chunk failed", ex, GET_SMALL_FILE_ERROR), request);
}
}
use of org.apache.hadoop.ozone.container.common.helpers.ChunkInfo in project ozone by apache.
the class FilePerChunkStrategy method readChunk.
/**
* reads the data defined by a chunk.
*
* @param container - Container for the chunk
* @param blockID - ID of the block.
* @param info - ChunkInfo.
* @param dispatcherContext dispatcher context info.
* @return byte array
* @throws StorageContainerException
* TODO: Right now we do not support partial reads and writes of chunks.
* TODO: Explore if we need to do that for ozone.
*/
@Override
public ChunkBuffer readChunk(Container container, BlockID blockID, ChunkInfo info, DispatcherContext dispatcherContext) throws StorageContainerException {
checkLayoutVersion(container);
limitReadSize(info.getLen());
KeyValueContainer kvContainer = (KeyValueContainer) container;
KeyValueContainerData containerData = kvContainer.getContainerData();
HddsVolume volume = containerData.getVolume();
// In version1, we verify checksum if it is available and return data
// of the chunk file.
File finalChunkFile = getChunkFile(kvContainer, blockID, info);
List<File> possibleFiles = new ArrayList<>();
possibleFiles.add(finalChunkFile);
if (dispatcherContext != null && dispatcherContext.isReadFromTmpFile()) {
possibleFiles.add(getTmpChunkFile(finalChunkFile, dispatcherContext));
// HDDS-2372. Read finalChunkFile after tmpChunkFile to solve race
// condition between read and commit.
possibleFiles.add(finalChunkFile);
}
long len = info.getLen();
long bufferCapacity = ChunkManager.getBufferCapacityForChunkRead(info, defaultReadBufferCapacity);
ByteBuffer[] dataBuffers = BufferUtils.assignByteBuffers(len, bufferCapacity);
long chunkFileOffset = 0;
if (info.getOffset() != 0) {
try {
BlockData blockData = blockManager.getBlock(kvContainer, blockID);
List<ContainerProtos.ChunkInfo> chunks = blockData.getChunks();
String chunkName = info.getChunkName();
boolean found = false;
for (ContainerProtos.ChunkInfo chunk : chunks) {
if (chunk.getChunkName().equals(chunkName)) {
chunkFileOffset = chunk.getOffset();
found = true;
break;
}
}
if (!found) {
throw new StorageContainerException("Cannot find chunk " + chunkName + " in block " + blockID.toString(), UNABLE_TO_FIND_CHUNK);
}
} catch (IOException e) {
throw new StorageContainerException("Cannot find block " + blockID.toString() + " for chunk " + info.getChunkName(), UNABLE_TO_FIND_CHUNK);
}
}
for (File file : possibleFiles) {
try {
if (file.exists()) {
long offset = info.getOffset() - chunkFileOffset;
Preconditions.checkState(offset >= 0);
ChunkUtils.readData(file, dataBuffers, offset, len, volume);
return ChunkBuffer.wrap(Lists.newArrayList(dataBuffers));
}
} catch (StorageContainerException ex) {
// next possible location
if (ex.getResult() != UNABLE_TO_FIND_CHUNK) {
throw ex;
}
BufferUtils.clearBuffers(dataBuffers);
}
}
throw new StorageContainerException("Chunk file can't be found " + possibleFiles.toString(), UNABLE_TO_FIND_CHUNK);
}
use of org.apache.hadoop.ozone.container.common.helpers.ChunkInfo in project ozone by apache.
the class FilePerChunkStrategy method deleteChunks.
@Override
public void deleteChunks(Container container, BlockData blockData) throws StorageContainerException {
for (ContainerProtos.ChunkInfo chunk : blockData.getChunks()) {
try {
ChunkInfo chunkInfo = ChunkInfo.getFromProtoBuf(chunk);
deleteChunk(container, blockData.getBlockID(), chunkInfo);
} catch (IOException e) {
throw new StorageContainerException(e, ContainerProtos.Result.INVALID_ARGUMENT);
}
}
}
use of org.apache.hadoop.ozone.container.common.helpers.ChunkInfo in project ozone by apache.
the class TestFilePerChunkStrategy method testWriteChunkStageWriteAndCommit.
@Test
public void testWriteChunkStageWriteAndCommit() throws Exception {
ChunkManager chunkManager = createTestSubject();
checkChunkFileCount(0);
// As no chunks are written to the volume writeBytes should be 0
checkWriteIOStats(0, 0);
KeyValueContainer container = getKeyValueContainer();
BlockID blockID = getBlockID();
ChunkInfo chunkInfo = getChunkInfo();
chunkManager.writeChunk(container, blockID, chunkInfo, getData(), new DispatcherContext.Builder().setStage(DispatcherContext.WriteChunkStage.WRITE_DATA).build());
// Now a chunk file is being written with Stage WRITE_DATA, so it should
// create a temporary chunk file.
checkChunkFileCount(1);
long term = 0;
long index = 0;
File chunkFile = ContainerLayoutVersion.FILE_PER_CHUNK.getChunkFile(container.getContainerData(), blockID, chunkInfo);
File tempChunkFile = new File(chunkFile.getParent(), chunkFile.getName() + OzoneConsts.CONTAINER_CHUNK_NAME_DELIMITER + OzoneConsts.CONTAINER_TEMPORARY_CHUNK_PREFIX + OzoneConsts.CONTAINER_CHUNK_NAME_DELIMITER + term + OzoneConsts.CONTAINER_CHUNK_NAME_DELIMITER + index);
// As chunk write stage is WRITE_DATA, temp chunk file will be created.
assertTrue(tempChunkFile.exists());
checkWriteIOStats(chunkInfo.getLen(), 1);
chunkManager.writeChunk(container, blockID, chunkInfo, getData(), new DispatcherContext.Builder().setStage(DispatcherContext.WriteChunkStage.COMMIT_DATA).build());
checkWriteIOStats(chunkInfo.getLen(), 1);
// Old temp file should have been renamed to chunk file.
checkChunkFileCount(1);
// As commit happened, chunk file should exist.
assertTrue(chunkFile.exists());
assertFalse(tempChunkFile.exists());
}
use of org.apache.hadoop.ozone.container.common.helpers.ChunkInfo in project ozone by apache.
the class TestContainerReader method addBlocks.
private List<Long> addBlocks(KeyValueContainer keyValueContainer, boolean setMetaData) throws Exception {
long containerId = keyValueContainer.getContainerData().getContainerID();
List<Long> blkNames = new ArrayList<>();
try (ReferenceCountedDB metadataStore = BlockUtils.getDB(keyValueContainer.getContainerData(), conf)) {
for (int i = 0; i < blockCount; i++) {
// Creating BlockData
BlockID blockID = new BlockID(containerId, i);
BlockData blockData = new BlockData(blockID);
blockData.addMetadata(OzoneConsts.VOLUME, OzoneConsts.OZONE);
blockData.addMetadata(OzoneConsts.OWNER, OzoneConsts.OZONE_SIMPLE_HDFS_USER);
List<ContainerProtos.ChunkInfo> chunkList = new ArrayList<>();
long localBlockID = blockID.getLocalID();
ChunkInfo info = new ChunkInfo(String.format("%d.data.%d", localBlockID, 0), 0, blockLen);
chunkList.add(info.getProtoBufMessage());
blockData.setChunks(chunkList);
blkNames.add(localBlockID);
metadataStore.getStore().getBlockDataTable().put(Long.toString(localBlockID), blockData);
}
if (setMetaData) {
metadataStore.getStore().getMetadataTable().put(OzoneConsts.BLOCK_COUNT, (long) blockCount);
metadataStore.getStore().getMetadataTable().put(OzoneConsts.CONTAINER_BYTES_USED, blockCount * blockLen);
}
}
return blkNames;
}
Aggregations