Search in sources :

Example 6 with ByteBuf

use of org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf in project hbase by apache.

the class FanOutOneBlockAsyncDFSOutput method endBlock.

private void endBlock() throws IOException {
    Preconditions.checkState(waitingAckQueue.isEmpty(), "should call flush first before calling close");
    if (state != State.STREAMING) {
        throw new IOException("stream already broken");
    }
    state = State.CLOSING;
    long finalizedLength = ackedBlockLength;
    PacketHeader header = new PacketHeader(4, finalizedLength, nextPacketSeqno, true, 0, false);
    buf.release();
    buf = null;
    int headerLen = header.getSerializedSize();
    ByteBuf headerBuf = alloc.directBuffer(headerLen);
    header.putInBuffer(headerBuf.nioBuffer(0, headerLen));
    headerBuf.writerIndex(headerLen);
    CompletableFuture<Long> future = new CompletableFuture<>();
    waitingAckQueue.add(new Callback(future, finalizedLength, datanodeInfoMap.keySet(), 0));
    datanodeInfoMap.keySet().forEach(ch -> ch.writeAndFlush(headerBuf.retainedDuplicate()));
    headerBuf.release();
    try {
        future.get();
    } catch (InterruptedException e) {
        throw (IOException) new InterruptedIOException().initCause(e);
    } catch (ExecutionException e) {
        Throwable cause = e.getCause();
        Throwables.propagateIfPossible(cause, IOException.class);
        throw new IOException(cause);
    }
}
Also used : InterruptedIOException(java.io.InterruptedIOException) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) ByteBuf(org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf) CompletableFuture(java.util.concurrent.CompletableFuture) PacketHeader(org.apache.hadoop.hdfs.protocol.datatransfer.PacketHeader) ExecutionException(java.util.concurrent.ExecutionException)

Example 7 with ByteBuf

use of org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf in project hbase by apache.

the class FanOutOneBlockAsyncDFSOutput method flushBuffer.

private void flushBuffer(CompletableFuture<Long> future, ByteBuf dataBuf, long nextPacketOffsetInBlock, boolean syncBlock) {
    int dataLen = dataBuf.readableBytes();
    int chunkLen = summer.getBytesPerChecksum();
    int trailingPartialChunkLen = dataLen % chunkLen;
    int numChecks = dataLen / chunkLen + (trailingPartialChunkLen != 0 ? 1 : 0);
    int checksumLen = numChecks * summer.getChecksumSize();
    ByteBuf checksumBuf = alloc.directBuffer(checksumLen);
    summer.calculateChunkedSums(dataBuf.nioBuffer(), checksumBuf.nioBuffer(0, checksumLen));
    checksumBuf.writerIndex(checksumLen);
    PacketHeader header = new PacketHeader(4 + checksumLen + dataLen, nextPacketOffsetInBlock, nextPacketSeqno, false, dataLen, syncBlock);
    int headerLen = header.getSerializedSize();
    ByteBuf headerBuf = alloc.buffer(headerLen);
    header.putInBuffer(headerBuf.nioBuffer(0, headerLen));
    headerBuf.writerIndex(headerLen);
    Callback c = new Callback(future, nextPacketOffsetInBlock + dataLen, datanodeInfoMap.keySet(), dataLen);
    waitingAckQueue.addLast(c);
    // recheck again after we pushed the callback to queue
    if (state != State.STREAMING && waitingAckQueue.peekFirst() == c) {
        future.completeExceptionally(new IOException("stream already broken"));
        // it's the one we have just pushed or just a no-op
        waitingAckQueue.removeFirst();
        return;
    }
    // TODO: we should perhaps measure time taken per DN here;
    // we could collect statistics per DN, and/or exclude bad nodes in createOutput.
    datanodeInfoMap.keySet().forEach(ch -> {
        ch.write(headerBuf.retainedDuplicate());
        ch.write(checksumBuf.retainedDuplicate());
        ch.writeAndFlush(dataBuf.retainedDuplicate());
    });
    checksumBuf.release();
    headerBuf.release();
    dataBuf.release();
    nextPacketSeqno++;
}
Also used : PacketHeader(org.apache.hadoop.hdfs.protocol.datatransfer.PacketHeader) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) ByteBuf(org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf)

Example 8 with ByteBuf

use of org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf in project hbase by apache.

the class FanOutOneBlockAsyncDFSOutput method flush0.

private void flush0(CompletableFuture<Long> future, boolean syncBlock) {
    if (state != State.STREAMING) {
        future.completeExceptionally(new IOException("stream already broken"));
        return;
    }
    int dataLen = buf.readableBytes();
    if (dataLen == trailingPartialChunkLength) {
        // no new data
        long lengthAfterFlush = nextPacketOffsetInBlock + dataLen;
        Callback lastFlush = waitingAckQueue.peekLast();
        if (lastFlush != null) {
            Callback c = new Callback(future, lengthAfterFlush, Collections.emptySet(), dataLen);
            waitingAckQueue.addLast(c);
            // recheck here if we have already removed the previous callback from the queue
            if (waitingAckQueue.peekFirst() == c) {
                // call below may be a no-op.
                if (state != State.STREAMING) {
                    future.completeExceptionally(new IOException("stream already broken"));
                } else {
                    future.complete(lengthAfterFlush);
                }
                // it's the one we have just pushed or just a no-op
                waitingAckQueue.removeFirst();
            }
        } else {
            // we must have acked all the data so the ackedBlockLength must be same with
            // lengthAfterFlush
            future.complete(lengthAfterFlush);
        }
        return;
    }
    if (encryptor != null) {
        ByteBuf encryptBuf = alloc.directBuffer(dataLen);
        buf.readBytes(encryptBuf, trailingPartialChunkLength);
        int toEncryptLength = dataLen - trailingPartialChunkLength;
        try {
            encryptor.encrypt(buf.nioBuffer(trailingPartialChunkLength, toEncryptLength), encryptBuf.nioBuffer(trailingPartialChunkLength, toEncryptLength));
        } catch (IOException e) {
            encryptBuf.release();
            future.completeExceptionally(e);
            return;
        }
        encryptBuf.writerIndex(dataLen);
        buf.release();
        buf = encryptBuf;
    }
    if (dataLen > maxDataLen) {
        // We need to write out the data by multiple packets as the max packet allowed is 16M.
        long nextSubPacketOffsetInBlock = nextPacketOffsetInBlock;
        for (int remaining = dataLen; ; ) {
            if (remaining < maxDataLen) {
                flushBuffer(future, buf.readRetainedSlice(remaining), nextSubPacketOffsetInBlock, syncBlock);
                break;
            } else {
                flushBuffer(new CompletableFuture<>(), buf.readRetainedSlice(maxDataLen), nextSubPacketOffsetInBlock, syncBlock);
                remaining -= maxDataLen;
                nextSubPacketOffsetInBlock += maxDataLen;
            }
        }
    } else {
        flushBuffer(future, buf.retain(), nextPacketOffsetInBlock, syncBlock);
    }
    trailingPartialChunkLength = dataLen % summer.getBytesPerChecksum();
    ByteBuf newBuf = alloc.directBuffer(sendBufSizePRedictor.guess(dataLen)).ensureWritable(trailingPartialChunkLength);
    if (trailingPartialChunkLength != 0) {
        buf.readerIndex(dataLen - trailingPartialChunkLength).readBytes(newBuf, trailingPartialChunkLength);
    }
    buf.release();
    this.buf = newBuf;
    nextPacketOffsetInBlock += dataLen - trailingPartialChunkLength;
}
Also used : InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) ByteBuf(org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf)

Example 9 with ByteBuf

use of org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf in project hbase by apache.

the class BlockingRpcConnection method writeRequest.

/**
 * Initiates a call by sending the parameter to the remote server. Note: this is not called from
 * the Connection thread, but by other threads.
 * @see #readResponse()
 */
private void writeRequest(Call call) throws IOException {
    ByteBuf cellBlock = null;
    try {
        cellBlock = this.rpcClient.cellBlockBuilder.buildCellBlock(this.codec, this.compressor, call.cells, PooledByteBufAllocator.DEFAULT);
        CellBlockMeta cellBlockMeta;
        if (cellBlock != null) {
            cellBlockMeta = CellBlockMeta.newBuilder().setLength(cellBlock.readableBytes()).build();
        } else {
            cellBlockMeta = null;
        }
        RequestHeader requestHeader = buildRequestHeader(call, cellBlockMeta);
        setupIOstreams();
        // know where we stand, we have to close the connection.
        if (Thread.interrupted()) {
            throw new InterruptedIOException();
        }
        // We put first as we don't want the connection to become idle.
        calls.put(call.id, call);
        // the pending calls map.
        try {
            call.callStats.setRequestSizeBytes(write(this.out, requestHeader, call.param, cellBlock));
        } catch (Throwable t) {
            if (LOG.isTraceEnabled()) {
                LOG.trace("Error while writing {}", call.toShortString());
            }
            IOException e = IPCUtil.toIOE(t);
            closeConn(e);
            return;
        }
    } finally {
        if (cellBlock != null) {
            cellBlock.release();
        }
    }
    notifyAll();
}
Also used : InterruptedIOException(java.io.InterruptedIOException) CellBlockMeta(org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.CellBlockMeta) RequestHeader(org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.RequestHeader) IPCUtil.buildRequestHeader(org.apache.hadoop.hbase.ipc.IPCUtil.buildRequestHeader) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) ByteBuf(org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf)

Example 10 with ByteBuf

use of org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf in project hbase by apache.

the class CryptoAESWrapHandler method flush.

@Override
public void flush(ChannelHandlerContext ctx) throws Exception {
    if (queue.isEmpty()) {
        return;
    }
    ByteBuf buf = null;
    try {
        ChannelPromise promise = ctx.newPromise();
        int readableBytes = queue.readableBytes();
        buf = queue.remove(readableBytes, promise);
        byte[] bytes = new byte[readableBytes];
        buf.readBytes(bytes);
        byte[] wrapperBytes = cryptoAES.wrap(bytes, 0, bytes.length);
        ChannelPromise lenPromise = ctx.newPromise();
        ctx.write(ctx.alloc().buffer(4).writeInt(wrapperBytes.length), lenPromise);
        ChannelPromise contentPromise = ctx.newPromise();
        ctx.write(Unpooled.wrappedBuffer(wrapperBytes), contentPromise);
        PromiseCombiner combiner = new PromiseCombiner();
        combiner.addAll(lenPromise, contentPromise);
        combiner.finish(promise);
        ctx.flush();
    } finally {
        if (buf != null) {
            ReferenceCountUtil.safeRelease(buf);
        }
    }
}
Also used : PromiseCombiner(org.apache.hbase.thirdparty.io.netty.util.concurrent.PromiseCombiner) ChannelPromise(org.apache.hbase.thirdparty.io.netty.channel.ChannelPromise) ByteBuf(org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf)

Aggregations

ByteBuf (org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf)12 IOException (java.io.IOException)5 InterruptedIOException (java.io.InterruptedIOException)4 ChannelPromise (org.apache.hbase.thirdparty.io.netty.channel.ChannelPromise)3 PromiseCombiner (org.apache.hbase.thirdparty.io.netty.util.concurrent.PromiseCombiner)3 ExecutionException (java.util.concurrent.ExecutionException)2 CellBlockMeta (org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.CellBlockMeta)2 RequestHeader (org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.RequestHeader)2 PacketHeader (org.apache.hadoop.hdfs.protocol.datatransfer.PacketHeader)2 ByteBufOutputStream (org.apache.hbase.thirdparty.io.netty.buffer.ByteBufOutputStream)2 ArrayList (java.util.ArrayList)1 Map (java.util.Map)1 CompletableFuture (java.util.concurrent.CompletableFuture)1 CyclicBarrier (java.util.concurrent.CyclicBarrier)1 DoNotRetryIOException (org.apache.hadoop.hbase.DoNotRetryIOException)1 IPCUtil.buildRequestHeader (org.apache.hadoop.hbase.ipc.IPCUtil.buildRequestHeader)1 DataNodeProperties (org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties)1 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)1 OpWriteBlockProto (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto)1 Channel (org.apache.hbase.thirdparty.io.netty.channel.Channel)1