Search in sources :

Example 26 with ChunkBuffer

use of org.apache.hadoop.ozone.common.ChunkBuffer in project ozone by apache.

the class BufferPool method allocateBuffer.

/**
 * If the currentBufferIndex is less than the buffer size - 1,
 * it means, the next buffer in the list has been freed up for
 * rewriting. Reuse the next available buffer in such cases.
 * <p>
 * In case, the currentBufferIndex == buffer.size and buffer size is still
 * less than the capacity to be allocated, just allocate a buffer of size
 * chunk size.
 */
public ChunkBuffer allocateBuffer(int increment) {
    currentBufferIndex++;
    Preconditions.checkArgument(currentBufferIndex <= capacity - 1);
    if (currentBufferIndex < bufferList.size()) {
        return getBuffer(currentBufferIndex);
    } else {
        final ChunkBuffer newBuffer = ChunkBuffer.allocate(bufferSize, increment);
        bufferList.add(newBuffer);
        Preconditions.checkArgument(bufferList.size() <= capacity);
        return newBuffer;
    }
}
Also used : ChunkBuffer(org.apache.hadoop.ozone.common.ChunkBuffer)

Example 27 with ChunkBuffer

use of org.apache.hadoop.ozone.common.ChunkBuffer in project ozone by apache.

the class BlockOutputStream method writeChunkToContainer.

/**
 * Writes buffered data as a new chunk to the container and saves chunk
 * information to be used later in putKey call.
 *
 * @throws IOException if there is an I/O error while performing the call
 * @throws OzoneChecksumException if there is an error while computing
 * checksum
 */
private void writeChunkToContainer(ChunkBuffer chunk) throws IOException {
    int effectiveChunkSize = chunk.remaining();
    final long offset = chunkOffset.getAndAdd(effectiveChunkSize);
    final ByteString data = chunk.toByteString(bufferPool.byteStringConversion());
    ChecksumData checksumData = checksum.computeChecksum(chunk);
    ChunkInfo chunkInfo = ChunkInfo.newBuilder().setChunkName(blockID.get().getLocalID() + "_chunk_" + ++chunkIndex).setOffset(offset).setLen(effectiveChunkSize).setChecksumData(checksumData.getProtoBufMessage()).build();
    if (LOG.isDebugEnabled()) {
        LOG.debug("Writing chunk {} length {} at offset {}", chunkInfo.getChunkName(), effectiveChunkSize, offset);
    }
    try {
        XceiverClientReply asyncReply = writeChunkAsync(xceiverClient, chunkInfo, blockID.get(), data, token);
        CompletableFuture<ContainerProtos.ContainerCommandResponseProto> future = asyncReply.getResponse();
        future.thenApplyAsync(e -> {
            try {
                validateResponse(e);
            } catch (IOException sce) {
                future.completeExceptionally(sce);
            }
            return e;
        }, responseExecutor).exceptionally(e -> {
            String msg = "Failed to write chunk " + chunkInfo.getChunkName() + " " + "into block " + blockID;
            LOG.debug("{}, exception: {}", msg, e.getLocalizedMessage());
            CompletionException ce = new CompletionException(msg, e);
            setIoException(ce);
            throw ce;
        });
    } catch (IOException | ExecutionException e) {
        throw new IOException(EXCEPTION_MSG + e.toString(), e);
    } catch (InterruptedException ex) {
        Thread.currentThread().interrupt();
        handleInterruptedException(ex, false);
    }
    containerBlockData.addChunks(chunkInfo);
}
Also used : BlockID(org.apache.hadoop.hdds.client.BlockID) OzoneChecksumException(org.apache.hadoop.ozone.common.OzoneChecksumException) ContainerCommandResponseProto(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandResponseProto) ContainerProtocolCalls.writeChunkAsync(org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls.writeChunkAsync) LoggerFactory(org.slf4j.LoggerFactory) CompletableFuture(java.util.concurrent.CompletableFuture) KeyValue(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.KeyValue) ChecksumData(org.apache.hadoop.ozone.common.ChecksumData) ContainerProtos(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos) AtomicReference(java.util.concurrent.atomic.AtomicReference) OzoneClientConfig(org.apache.hadoop.hdds.scm.OzoneClientConfig) ArrayList(java.util.ArrayList) TokenIdentifier(org.apache.hadoop.security.token.TokenIdentifier) ByteString(org.apache.ratis.thirdparty.com.google.protobuf.ByteString) Checksum(org.apache.hadoop.ozone.common.Checksum) ExecutorService(java.util.concurrent.ExecutorService) OutputStream(java.io.OutputStream) XceiverClientFactory(org.apache.hadoop.hdds.scm.XceiverClientFactory) BlockData(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.BlockData) Logger(org.slf4j.Logger) DatanodeDetails(org.apache.hadoop.hdds.protocol.DatanodeDetails) IOException(java.io.IOException) CompletionException(java.util.concurrent.CompletionException) ChunkInfo(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChunkInfo) Pipeline(org.apache.hadoop.hdds.scm.pipeline.Pipeline) Token(org.apache.hadoop.security.token.Token) ContainerProtocolCalls.putBlockAsync(org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls.putBlockAsync) Executors(java.util.concurrent.Executors) XceiverClientReply(org.apache.hadoop.hdds.scm.XceiverClientReply) ExecutionException(java.util.concurrent.ExecutionException) AtomicLong(java.util.concurrent.atomic.AtomicLong) List(java.util.List) XceiverClientSpi(org.apache.hadoop.hdds.scm.XceiverClientSpi) StorageContainerException(org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException) ChunkBuffer(org.apache.hadoop.ozone.common.ChunkBuffer) Preconditions(com.google.common.base.Preconditions) VisibleForTesting(com.google.common.annotations.VisibleForTesting) ChunkInfo(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChunkInfo) ChecksumData(org.apache.hadoop.ozone.common.ChecksumData) ByteString(org.apache.ratis.thirdparty.com.google.protobuf.ByteString) IOException(java.io.IOException) ByteString(org.apache.ratis.thirdparty.com.google.protobuf.ByteString) ContainerCommandResponseProto(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandResponseProto) XceiverClientReply(org.apache.hadoop.hdds.scm.XceiverClientReply) CompletionException(java.util.concurrent.CompletionException) ExecutionException(java.util.concurrent.ExecutionException)

Example 28 with ChunkBuffer

use of org.apache.hadoop.ozone.common.ChunkBuffer in project ozone by apache.

the class BlockOutputStream method writeOnRetry.

/**
 * Will be called on the retryPath in case closedContainerException/
 * TimeoutException.
 * @param len length of data to write
 * @throws IOException if error occurred
 */
// In this case, the data is already cached in the currentBuffer.
public void writeOnRetry(long len) throws IOException {
    if (len == 0) {
        return;
    }
    if (LOG.isDebugEnabled()) {
        LOG.debug("Retrying write length {} for blockID {}", len, blockID);
    }
    Preconditions.checkArgument(len <= config.getStreamBufferMaxSize());
    int count = 0;
    while (len > 0) {
        ChunkBuffer buffer = bufferPool.getBuffer(count);
        long writeLen = Math.min(buffer.position(), len);
        if (!buffer.hasRemaining()) {
            writeChunk(buffer);
        }
        len -= writeLen;
        count++;
        writtenDataLength += writeLen;
        // call for handling full buffer/flush buffer condition.
        if (writtenDataLength % config.getStreamBufferFlushSize() == 0) {
            // reset the position to zero as now we will be reading the
            // next buffer in the list
            updateFlushLength();
            executePutBlock(false, false);
        }
        if (writtenDataLength == config.getStreamBufferMaxSize()) {
            handleFullBuffer();
        }
    }
}
Also used : ChunkBuffer(org.apache.hadoop.ozone.common.ChunkBuffer)

Example 29 with ChunkBuffer

use of org.apache.hadoop.ozone.common.ChunkBuffer in project ozone by apache.

the class BlockOutputStream method executePutBlock.

/**
 * @param close whether putBlock is happening as part of closing the stream
 * @param force true if no data was written since most recent putBlock and
 *            stream is being closed
 */
private CompletableFuture<ContainerProtos.ContainerCommandResponseProto> executePutBlock(boolean close, boolean force) throws IOException {
    checkOpen();
    long flushPos = totalDataFlushedLength;
    final List<ChunkBuffer> byteBufferList;
    if (!force) {
        Preconditions.checkNotNull(bufferList);
        byteBufferList = bufferList;
        bufferList = null;
        Preconditions.checkNotNull(byteBufferList);
    } else {
        byteBufferList = null;
    }
    CompletableFuture<ContainerProtos.ContainerCommandResponseProto> flushFuture = null;
    try {
        BlockData blockData = containerBlockData.build();
        XceiverClientReply asyncReply = putBlockAsync(xceiverClient, blockData, close, token);
        CompletableFuture<ContainerProtos.ContainerCommandResponseProto> future = asyncReply.getResponse();
        flushFuture = future.thenApplyAsync(e -> {
            try {
                validateResponse(e);
            } catch (IOException sce) {
                throw new CompletionException(sce);
            }
            // if the ioException is not set, putBlock is successful
            if (getIoException() == null && !force) {
                BlockID responseBlockID = BlockID.getFromProtobuf(e.getPutBlock().getCommittedBlockLength().getBlockID());
                Preconditions.checkState(blockID.get().getContainerBlockID().equals(responseBlockID.getContainerBlockID()));
                // updates the bcsId of the block
                blockID.set(responseBlockID);
                if (LOG.isDebugEnabled()) {
                    LOG.debug("Adding index " + asyncReply.getLogIndex() + " flushLength " + flushPos + " numBuffers " + byteBufferList.size() + " blockID " + blockID + " bufferPool size" + bufferPool.getSize() + " currentBufferIndex " + bufferPool.getCurrentBufferIndex());
                }
                // for standalone protocol, logIndex will always be 0.
                updateCommitInfo(asyncReply, byteBufferList);
            }
            return e;
        }, responseExecutor).exceptionally(e -> {
            if (LOG.isDebugEnabled()) {
                LOG.debug("putBlock failed for blockID {} with exception {}", blockID, e.getLocalizedMessage());
            }
            CompletionException ce = new CompletionException(e);
            setIoException(ce);
            throw ce;
        });
    } catch (IOException | ExecutionException e) {
        throw new IOException(EXCEPTION_MSG + e.toString(), e);
    } catch (InterruptedException ex) {
        Thread.currentThread().interrupt();
        handleInterruptedException(ex, false);
    }
    putFlushFuture(flushPos, flushFuture);
    return flushFuture;
}
Also used : BlockID(org.apache.hadoop.hdds.client.BlockID) OzoneChecksumException(org.apache.hadoop.ozone.common.OzoneChecksumException) ContainerCommandResponseProto(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandResponseProto) ContainerProtocolCalls.writeChunkAsync(org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls.writeChunkAsync) LoggerFactory(org.slf4j.LoggerFactory) CompletableFuture(java.util.concurrent.CompletableFuture) KeyValue(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.KeyValue) ChecksumData(org.apache.hadoop.ozone.common.ChecksumData) ContainerProtos(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos) AtomicReference(java.util.concurrent.atomic.AtomicReference) OzoneClientConfig(org.apache.hadoop.hdds.scm.OzoneClientConfig) ArrayList(java.util.ArrayList) TokenIdentifier(org.apache.hadoop.security.token.TokenIdentifier) ByteString(org.apache.ratis.thirdparty.com.google.protobuf.ByteString) Checksum(org.apache.hadoop.ozone.common.Checksum) ExecutorService(java.util.concurrent.ExecutorService) OutputStream(java.io.OutputStream) XceiverClientFactory(org.apache.hadoop.hdds.scm.XceiverClientFactory) BlockData(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.BlockData) Logger(org.slf4j.Logger) DatanodeDetails(org.apache.hadoop.hdds.protocol.DatanodeDetails) IOException(java.io.IOException) CompletionException(java.util.concurrent.CompletionException) ChunkInfo(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChunkInfo) Pipeline(org.apache.hadoop.hdds.scm.pipeline.Pipeline) Token(org.apache.hadoop.security.token.Token) ContainerProtocolCalls.putBlockAsync(org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls.putBlockAsync) Executors(java.util.concurrent.Executors) XceiverClientReply(org.apache.hadoop.hdds.scm.XceiverClientReply) ExecutionException(java.util.concurrent.ExecutionException) AtomicLong(java.util.concurrent.atomic.AtomicLong) List(java.util.List) XceiverClientSpi(org.apache.hadoop.hdds.scm.XceiverClientSpi) StorageContainerException(org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException) ChunkBuffer(org.apache.hadoop.ozone.common.ChunkBuffer) Preconditions(com.google.common.base.Preconditions) VisibleForTesting(com.google.common.annotations.VisibleForTesting) IOException(java.io.IOException) ContainerCommandResponseProto(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandResponseProto) XceiverClientReply(org.apache.hadoop.hdds.scm.XceiverClientReply) CompletionException(java.util.concurrent.CompletionException) ChunkBuffer(org.apache.hadoop.ozone.common.ChunkBuffer) BlockID(org.apache.hadoop.hdds.client.BlockID) BlockData(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.BlockData) ExecutionException(java.util.concurrent.ExecutionException)

Example 30 with ChunkBuffer

use of org.apache.hadoop.ozone.common.ChunkBuffer in project ozone by apache.

the class CommitWatcher method releaseBuffers.

/**
 * just update the totalAckDataLength. In case of failure,
 * we will read the data starting from totalAckDataLength.
 */
private long releaseBuffers(List<Long> indexes) {
    Preconditions.checkArgument(!commitIndex2flushedDataMap.isEmpty());
    for (long index : indexes) {
        Preconditions.checkState(commitIndex2flushedDataMap.containsKey(index));
        final List<ChunkBuffer> buffers = commitIndex2flushedDataMap.remove(index);
        long length = buffers.stream().mapToLong(ChunkBuffer::position).sum();
        totalAckDataLength += length;
        // clear the future object from the future Map
        final CompletableFuture<ContainerCommandResponseProto> remove = futureMap.remove(totalAckDataLength);
        if (remove == null) {
            LOG.error("Couldn't find required future for " + totalAckDataLength);
            for (Long key : futureMap.keySet()) {
                LOG.error("Existing acknowledged data: " + key);
            }
        }
        Preconditions.checkNotNull(remove);
        for (ChunkBuffer byteBuffer : buffers) {
            bufferPool.releaseBuffer(byteBuffer);
        }
    }
    return totalAckDataLength;
}
Also used : ChunkBuffer(org.apache.hadoop.ozone.common.ChunkBuffer) ContainerCommandResponseProto(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandResponseProto)

Aggregations

ChunkBuffer (org.apache.hadoop.ozone.common.ChunkBuffer)30 BlockID (org.apache.hadoop.hdds.client.BlockID)14 ChunkInfo (org.apache.hadoop.ozone.container.common.helpers.ChunkInfo)14 Test (org.junit.Test)13 IOException (java.io.IOException)10 ContainerProtos (org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos)9 StorageContainerException (org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException)8 KeyValueContainer (org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer)8 ArrayList (java.util.ArrayList)6 BlockData (org.apache.hadoop.ozone.container.common.helpers.BlockData)5 CompletableFuture (java.util.concurrent.CompletableFuture)4 ContainerCommandResponseProto (org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandResponseProto)4 XceiverClientReply (org.apache.hadoop.hdds.scm.XceiverClientReply)4 XceiverClientSpi (org.apache.hadoop.hdds.scm.XceiverClientSpi)4 Pipeline (org.apache.hadoop.hdds.scm.pipeline.Pipeline)4 Container (org.apache.hadoop.ozone.container.common.interfaces.Container)4 ChunkManager (org.apache.hadoop.ozone.container.keyvalue.interfaces.ChunkManager)4 ByteBuffer (java.nio.ByteBuffer)3 Path (java.nio.file.Path)3 LinkedList (java.util.LinkedList)3