use of org.apache.hadoop.ozone.common.ChunkBuffer in project ozone by apache.
the class BufferPool method allocateBuffer.
/**
* If the currentBufferIndex is less than the buffer size - 1,
* it means, the next buffer in the list has been freed up for
* rewriting. Reuse the next available buffer in such cases.
* <p>
* In case, the currentBufferIndex == buffer.size and buffer size is still
* less than the capacity to be allocated, just allocate a buffer of size
* chunk size.
*/
public ChunkBuffer allocateBuffer(int increment) {
currentBufferIndex++;
Preconditions.checkArgument(currentBufferIndex <= capacity - 1);
if (currentBufferIndex < bufferList.size()) {
return getBuffer(currentBufferIndex);
} else {
final ChunkBuffer newBuffer = ChunkBuffer.allocate(bufferSize, increment);
bufferList.add(newBuffer);
Preconditions.checkArgument(bufferList.size() <= capacity);
return newBuffer;
}
}
use of org.apache.hadoop.ozone.common.ChunkBuffer in project ozone by apache.
the class BlockOutputStream method writeChunkToContainer.
/**
* Writes buffered data as a new chunk to the container and saves chunk
* information to be used later in putKey call.
*
* @throws IOException if there is an I/O error while performing the call
* @throws OzoneChecksumException if there is an error while computing
* checksum
*/
private void writeChunkToContainer(ChunkBuffer chunk) throws IOException {
int effectiveChunkSize = chunk.remaining();
final long offset = chunkOffset.getAndAdd(effectiveChunkSize);
final ByteString data = chunk.toByteString(bufferPool.byteStringConversion());
ChecksumData checksumData = checksum.computeChecksum(chunk);
ChunkInfo chunkInfo = ChunkInfo.newBuilder().setChunkName(blockID.get().getLocalID() + "_chunk_" + ++chunkIndex).setOffset(offset).setLen(effectiveChunkSize).setChecksumData(checksumData.getProtoBufMessage()).build();
if (LOG.isDebugEnabled()) {
LOG.debug("Writing chunk {} length {} at offset {}", chunkInfo.getChunkName(), effectiveChunkSize, offset);
}
try {
XceiverClientReply asyncReply = writeChunkAsync(xceiverClient, chunkInfo, blockID.get(), data, token);
CompletableFuture<ContainerProtos.ContainerCommandResponseProto> future = asyncReply.getResponse();
future.thenApplyAsync(e -> {
try {
validateResponse(e);
} catch (IOException sce) {
future.completeExceptionally(sce);
}
return e;
}, responseExecutor).exceptionally(e -> {
String msg = "Failed to write chunk " + chunkInfo.getChunkName() + " " + "into block " + blockID;
LOG.debug("{}, exception: {}", msg, e.getLocalizedMessage());
CompletionException ce = new CompletionException(msg, e);
setIoException(ce);
throw ce;
});
} catch (IOException | ExecutionException e) {
throw new IOException(EXCEPTION_MSG + e.toString(), e);
} catch (InterruptedException ex) {
Thread.currentThread().interrupt();
handleInterruptedException(ex, false);
}
containerBlockData.addChunks(chunkInfo);
}
use of org.apache.hadoop.ozone.common.ChunkBuffer in project ozone by apache.
the class BlockOutputStream method writeOnRetry.
/**
* Will be called on the retryPath in case closedContainerException/
* TimeoutException.
* @param len length of data to write
* @throws IOException if error occurred
*/
// In this case, the data is already cached in the currentBuffer.
public void writeOnRetry(long len) throws IOException {
if (len == 0) {
return;
}
if (LOG.isDebugEnabled()) {
LOG.debug("Retrying write length {} for blockID {}", len, blockID);
}
Preconditions.checkArgument(len <= config.getStreamBufferMaxSize());
int count = 0;
while (len > 0) {
ChunkBuffer buffer = bufferPool.getBuffer(count);
long writeLen = Math.min(buffer.position(), len);
if (!buffer.hasRemaining()) {
writeChunk(buffer);
}
len -= writeLen;
count++;
writtenDataLength += writeLen;
// call for handling full buffer/flush buffer condition.
if (writtenDataLength % config.getStreamBufferFlushSize() == 0) {
// reset the position to zero as now we will be reading the
// next buffer in the list
updateFlushLength();
executePutBlock(false, false);
}
if (writtenDataLength == config.getStreamBufferMaxSize()) {
handleFullBuffer();
}
}
}
use of org.apache.hadoop.ozone.common.ChunkBuffer in project ozone by apache.
the class BlockOutputStream method executePutBlock.
/**
* @param close whether putBlock is happening as part of closing the stream
* @param force true if no data was written since most recent putBlock and
* stream is being closed
*/
private CompletableFuture<ContainerProtos.ContainerCommandResponseProto> executePutBlock(boolean close, boolean force) throws IOException {
checkOpen();
long flushPos = totalDataFlushedLength;
final List<ChunkBuffer> byteBufferList;
if (!force) {
Preconditions.checkNotNull(bufferList);
byteBufferList = bufferList;
bufferList = null;
Preconditions.checkNotNull(byteBufferList);
} else {
byteBufferList = null;
}
CompletableFuture<ContainerProtos.ContainerCommandResponseProto> flushFuture = null;
try {
BlockData blockData = containerBlockData.build();
XceiverClientReply asyncReply = putBlockAsync(xceiverClient, blockData, close, token);
CompletableFuture<ContainerProtos.ContainerCommandResponseProto> future = asyncReply.getResponse();
flushFuture = future.thenApplyAsync(e -> {
try {
validateResponse(e);
} catch (IOException sce) {
throw new CompletionException(sce);
}
// if the ioException is not set, putBlock is successful
if (getIoException() == null && !force) {
BlockID responseBlockID = BlockID.getFromProtobuf(e.getPutBlock().getCommittedBlockLength().getBlockID());
Preconditions.checkState(blockID.get().getContainerBlockID().equals(responseBlockID.getContainerBlockID()));
// updates the bcsId of the block
blockID.set(responseBlockID);
if (LOG.isDebugEnabled()) {
LOG.debug("Adding index " + asyncReply.getLogIndex() + " flushLength " + flushPos + " numBuffers " + byteBufferList.size() + " blockID " + blockID + " bufferPool size" + bufferPool.getSize() + " currentBufferIndex " + bufferPool.getCurrentBufferIndex());
}
// for standalone protocol, logIndex will always be 0.
updateCommitInfo(asyncReply, byteBufferList);
}
return e;
}, responseExecutor).exceptionally(e -> {
if (LOG.isDebugEnabled()) {
LOG.debug("putBlock failed for blockID {} with exception {}", blockID, e.getLocalizedMessage());
}
CompletionException ce = new CompletionException(e);
setIoException(ce);
throw ce;
});
} catch (IOException | ExecutionException e) {
throw new IOException(EXCEPTION_MSG + e.toString(), e);
} catch (InterruptedException ex) {
Thread.currentThread().interrupt();
handleInterruptedException(ex, false);
}
putFlushFuture(flushPos, flushFuture);
return flushFuture;
}
use of org.apache.hadoop.ozone.common.ChunkBuffer in project ozone by apache.
the class CommitWatcher method releaseBuffers.
/**
* just update the totalAckDataLength. In case of failure,
* we will read the data starting from totalAckDataLength.
*/
private long releaseBuffers(List<Long> indexes) {
Preconditions.checkArgument(!commitIndex2flushedDataMap.isEmpty());
for (long index : indexes) {
Preconditions.checkState(commitIndex2flushedDataMap.containsKey(index));
final List<ChunkBuffer> buffers = commitIndex2flushedDataMap.remove(index);
long length = buffers.stream().mapToLong(ChunkBuffer::position).sum();
totalAckDataLength += length;
// clear the future object from the future Map
final CompletableFuture<ContainerCommandResponseProto> remove = futureMap.remove(totalAckDataLength);
if (remove == null) {
LOG.error("Couldn't find required future for " + totalAckDataLength);
for (Long key : futureMap.keySet()) {
LOG.error("Existing acknowledged data: " + key);
}
}
Preconditions.checkNotNull(remove);
for (ChunkBuffer byteBuffer : buffers) {
bufferPool.releaseBuffer(byteBuffer);
}
}
return totalAckDataLength;
}
Aggregations