use of org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChunkInfo in project ozone by apache.
the class BlockOutputStream method writeChunkToContainer.
/**
* Writes buffered data as a new chunk to the container and saves chunk
* information to be used later in putKey call.
*
* @throws IOException if there is an I/O error while performing the call
* @throws OzoneChecksumException if there is an error while computing
* checksum
*/
private void writeChunkToContainer(ChunkBuffer chunk) throws IOException {
int effectiveChunkSize = chunk.remaining();
final long offset = chunkOffset.getAndAdd(effectiveChunkSize);
final ByteString data = chunk.toByteString(bufferPool.byteStringConversion());
ChecksumData checksumData = checksum.computeChecksum(chunk);
ChunkInfo chunkInfo = ChunkInfo.newBuilder().setChunkName(blockID.get().getLocalID() + "_chunk_" + ++chunkIndex).setOffset(offset).setLen(effectiveChunkSize).setChecksumData(checksumData.getProtoBufMessage()).build();
if (LOG.isDebugEnabled()) {
LOG.debug("Writing chunk {} length {} at offset {}", chunkInfo.getChunkName(), effectiveChunkSize, offset);
}
try {
XceiverClientReply asyncReply = writeChunkAsync(xceiverClient, chunkInfo, blockID.get(), data, token);
CompletableFuture<ContainerProtos.ContainerCommandResponseProto> future = asyncReply.getResponse();
future.thenApplyAsync(e -> {
try {
validateResponse(e);
} catch (IOException sce) {
future.completeExceptionally(sce);
}
return e;
}, responseExecutor).exceptionally(e -> {
String msg = "Failed to write chunk " + chunkInfo.getChunkName() + " " + "into block " + blockID;
LOG.debug("{}, exception: {}", msg, e.getLocalizedMessage());
CompletionException ce = new CompletionException(msg, e);
setIoException(ce);
throw ce;
});
} catch (IOException | ExecutionException e) {
throw new IOException(EXCEPTION_MSG + e.toString(), e);
} catch (InterruptedException ex) {
Thread.currentThread().interrupt();
handleInterruptedException(ex, false);
}
containerBlockData.addChunks(chunkInfo);
}
use of org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChunkInfo in project ozone by apache.
the class ChunkInputStream method readChunkFromContainer.
/**
* Reads full or partial Chunk from DN Container based on the current
* position of the ChunkInputStream, the number of bytes of data to read
* and the checksum boundaries.
* If successful, then the read data in saved in the buffers so that
* subsequent read calls can utilize it.
* @param len number of bytes of data to be read
* @throws IOException if there is an I/O error while performing the call
* to Datanode
*/
private synchronized void readChunkFromContainer(int len) throws IOException {
// index of first byte to be read from the chunk
long startByteIndex;
if (chunkPosition >= 0) {
// If seek operation was called to advance the buffer position, the
// chunk should be read from that position onwards.
startByteIndex = chunkPosition;
} else {
// Start reading the chunk from the last chunkPosition onwards.
startByteIndex = bufferOffsetWrtChunkData + buffersSize;
}
// bufferOffsetWrtChunkData and buffersSize are updated after the data
// is read from Container and put into the buffers, but if read fails
// and is retried, we need the previous position. Position is reset after
// successful read in adjustBufferPosition()
storePosition();
long adjustedBuffersOffset, adjustedBuffersLen;
if (verifyChecksum) {
// Adjust the chunk offset and length to include required checksum
// boundaries
Pair<Long, Long> adjustedOffsetAndLength = computeChecksumBoundaries(startByteIndex, len);
adjustedBuffersOffset = adjustedOffsetAndLength.getLeft();
adjustedBuffersLen = adjustedOffsetAndLength.getRight();
} else {
// Read from the startByteIndex
adjustedBuffersOffset = startByteIndex;
adjustedBuffersLen = len;
}
// Adjust the chunkInfo so that only the required bytes are read from
// the chunk.
final ChunkInfo adjustedChunkInfo = ChunkInfo.newBuilder(chunkInfo).setOffset(chunkInfo.getOffset() + adjustedBuffersOffset).setLen(adjustedBuffersLen).build();
readChunkDataIntoBuffers(adjustedChunkInfo);
bufferOffsetWrtChunkData = adjustedBuffersOffset;
// If the stream was seeked to position before, then the buffer
// position should be adjusted as the reads happen at checksum boundaries.
// The buffers position might need to be adjusted for the following
// scenarios:
// 1. Stream was seeked to a position before the chunk was read
// 2. Chunk was read from index < the current position to account for
// checksum boundaries.
adjustBufferPosition(startByteIndex - bufferOffsetWrtChunkData);
}
use of org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChunkInfo in project ozone by apache.
the class TestBlockInputStream method testReadNotRetriedOnOtherException.
@Test
public void testReadNotRetriedOnOtherException() throws Exception {
// GIVEN
BlockID blockID = new BlockID(new ContainerBlockID(1, 1));
Pipeline pipeline = MockPipeline.createSingleNodePipeline();
final int len = 200;
final ChunkInputStream stream = mock(ChunkInputStream.class);
when(stream.read(any(), anyInt(), anyInt())).thenThrow(new OzoneChecksumException("checksum missing"));
when(stream.getRemaining()).thenReturn((long) len);
BlockInputStream subject = new DummyBlockInputStream(blockID, blockSize, pipeline, null, false, null, refreshPipeline, chunks, null) {
@Override
protected ChunkInputStream createChunkInputStream(ChunkInfo chunkInfo) {
return stream;
}
};
try {
subject.initialize();
// WHEN
byte[] b = new byte[len];
LambdaTestUtils.intercept(OzoneChecksumException.class, () -> subject.read(b, 0, len));
// THEN
verify(refreshPipeline, never()).apply(blockID);
} finally {
subject.close();
}
}
use of org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChunkInfo in project ozone by apache.
the class TestBlockInputStream method createChunkList.
/**
* Create a mock list of chunks. The first n-1 chunks of length CHUNK_SIZE
* and the last chunk with length CHUNK_SIZE/2.
*/
private void createChunkList(int numChunks) throws Exception {
chunks = new ArrayList<>(numChunks);
chunkDataMap = new HashMap<>();
blockData = new byte[0];
int i, chunkLen;
byte[] byteData;
String chunkName;
for (i = 0; i < numChunks; i++) {
chunkName = "chunk-" + i;
chunkLen = CHUNK_SIZE;
if (i == numChunks - 1) {
chunkLen = CHUNK_SIZE / 2;
}
byteData = generateRandomData(chunkLen);
ChunkInfo chunkInfo = ChunkInfo.newBuilder().setChunkName(chunkName).setOffset(0).setLen(chunkLen).setChecksumData(checksum.computeChecksum(byteData, 0, chunkLen).getProtoBufMessage()).build();
chunkDataMap.put(chunkName, byteData);
chunks.add(chunkInfo);
blockSize += chunkLen;
blockData = Bytes.concat(blockData, byteData);
}
}
use of org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChunkInfo in project ozone by apache.
the class TestBlockInputStream method testRefreshOnReadFailureAfterUnbuffer.
@Test
public void testRefreshOnReadFailureAfterUnbuffer() throws Exception {
// GIVEN
BlockID blockID = new BlockID(new ContainerBlockID(1, 1));
Pipeline pipeline = MockPipeline.createSingleNodePipeline();
Pipeline newPipeline = MockPipeline.createSingleNodePipeline();
XceiverClientFactory clientFactory = mock(XceiverClientFactory.class);
XceiverClientSpi client = mock(XceiverClientSpi.class);
when(clientFactory.acquireClientForReadData(pipeline)).thenReturn(client);
final int len = 200;
final ChunkInputStream stream = mock(ChunkInputStream.class);
when(stream.read(any(), anyInt(), anyInt())).thenThrow(new StorageContainerException("test", CONTAINER_NOT_FOUND)).thenReturn(len);
when(stream.getRemaining()).thenReturn((long) len);
when(refreshPipeline.apply(blockID)).thenReturn(newPipeline);
BlockInputStream subject = new BlockInputStream(blockID, blockSize, pipeline, null, false, clientFactory, refreshPipeline) {
@Override
protected List<ChunkInfo> getChunkInfos() throws IOException {
acquireClient();
return chunks;
}
@Override
protected ChunkInputStream createChunkInputStream(ChunkInfo chunkInfo) {
return stream;
}
};
try {
subject.initialize();
subject.unbuffer();
// WHEN
byte[] b = new byte[len];
int bytesRead = subject.read(b, 0, len);
// THEN
Assert.assertEquals(len, bytesRead);
verify(refreshPipeline).apply(blockID);
verify(clientFactory).acquireClientForReadData(pipeline);
verify(clientFactory).releaseClient(client, false);
} finally {
subject.close();
}
}
Aggregations