use of org.apache.hadoop.hdfs.protocol.datatransfer.PacketHeader in project hbase by apache.
the class FanOutOneBlockAsyncDFSOutput method flushBuffer.
private Promise<Void> flushBuffer(ByteBuf dataBuf, long nextPacketOffsetInBlock, boolean syncBlock) {
int dataLen = dataBuf.readableBytes();
int chunkLen = summer.getBytesPerChecksum();
int trailingPartialChunkLen = dataLen % chunkLen;
int numChecks = dataLen / chunkLen + (trailingPartialChunkLen != 0 ? 1 : 0);
int checksumLen = numChecks * summer.getChecksumSize();
ByteBuf checksumBuf = alloc.directBuffer(checksumLen);
summer.calculateChunkedSums(dataBuf.nioBuffer(), checksumBuf.nioBuffer(0, checksumLen));
checksumBuf.writerIndex(checksumLen);
PacketHeader header = new PacketHeader(4 + checksumLen + dataLen, nextPacketOffsetInBlock, nextPacketSeqno, false, dataLen, syncBlock);
int headerLen = header.getSerializedSize();
ByteBuf headerBuf = alloc.buffer(headerLen);
header.putInBuffer(headerBuf.nioBuffer(0, headerLen));
headerBuf.writerIndex(headerLen);
long ackedLength = nextPacketOffsetInBlock + dataLen;
Promise<Void> promise = eventLoop.<Void>newPromise().addListener(future -> {
if (future.isSuccess()) {
locatedBlock.getBlock().setNumBytes(ackedLength);
}
});
waitingAckQueue.addLast(new Callback(promise, ackedLength, datanodeList));
for (Channel ch : datanodeList) {
ch.write(headerBuf.duplicate().retain());
ch.write(checksumBuf.duplicate().retain());
ch.writeAndFlush(dataBuf.duplicate().retain());
}
checksumBuf.release();
headerBuf.release();
dataBuf.release();
nextPacketSeqno++;
return promise;
}
use of org.apache.hadoop.hdfs.protocol.datatransfer.PacketHeader in project hbase by apache.
the class FanOutOneBlockAsyncDFSOutput method endBlock.
private void endBlock(Promise<Void> promise, long size) {
if (state != State.STREAMING) {
promise.tryFailure(new IOException("stream already broken"));
return;
}
if (!waitingAckQueue.isEmpty()) {
promise.tryFailure(new IllegalStateException("should call flush first before calling close"));
return;
}
state = State.CLOSING;
PacketHeader header = new PacketHeader(4, size, nextPacketSeqno, true, 0, false);
buf.release();
buf = null;
int headerLen = header.getSerializedSize();
ByteBuf headerBuf = alloc.directBuffer(headerLen);
header.putInBuffer(headerBuf.nioBuffer(0, headerLen));
headerBuf.writerIndex(headerLen);
waitingAckQueue.add(new Callback(promise, size, datanodeList));
datanodeList.forEach(ch -> ch.writeAndFlush(headerBuf.duplicate().retain()));
headerBuf.release();
}
use of org.apache.hadoop.hdfs.protocol.datatransfer.PacketHeader in project hadoop by apache.
the class DFSPacket method writeTo.
/**
* Write the full packet, including the header, to the given output stream.
*
* @throws IOException
*/
public synchronized void writeTo(DataOutputStream stm) throws IOException {
checkBuffer();
final int dataLen = dataPos - dataStart;
final int checksumLen = checksumPos - checksumStart;
final int pktLen = HdfsConstants.BYTES_IN_INTEGER + dataLen + checksumLen;
PacketHeader header = new PacketHeader(pktLen, offsetInBlock, seqno, lastPacketInBlock, dataLen, syncBlock);
if (checksumPos != dataStart) {
// Move the checksum to cover the gap. This can happen for the last
// packet or during an hflush/hsync call.
System.arraycopy(buf, checksumStart, buf, dataStart - checksumLen, checksumLen);
checksumPos = dataStart;
checksumStart = checksumPos - checksumLen;
}
final int headerStart = checksumStart - header.getSerializedSize();
assert checksumStart + 1 >= header.getSerializedSize();
assert headerStart >= 0;
assert headerStart + header.getSerializedSize() == checksumStart;
// Copy the header data into the buffer immediately preceding the checksum
// data.
System.arraycopy(header.getBytes(), 0, buf, headerStart, header.getSerializedSize());
// corrupt the data for testing.
if (DFSClientFaultInjector.get().corruptPacket()) {
buf[headerStart + header.getSerializedSize() + checksumLen + dataLen - 1] ^= 0xff;
}
// Write the now contiguous full packet to the output stream.
stm.write(buf, headerStart, header.getSerializedSize() + checksumLen + dataLen);
// undo corruption.
if (DFSClientFaultInjector.get().uncorruptPacket()) {
buf[headerStart + header.getSerializedSize() + checksumLen + dataLen - 1] ^= 0xff;
}
}
use of org.apache.hadoop.hdfs.protocol.datatransfer.PacketHeader in project hadoop by apache.
the class BlockReaderRemote method readTrailingEmptyPacket.
private void readTrailingEmptyPacket() throws IOException {
LOG.trace("Reading empty packet at end of read");
packetReceiver.receiveNextPacket(in);
PacketHeader trailer = packetReceiver.getHeader();
if (!trailer.isLastPacketInBlock() || trailer.getDataLen() != 0) {
throw new IOException("Expected empty end-of-read packet! Header: " + trailer);
}
}
use of org.apache.hadoop.hdfs.protocol.datatransfer.PacketHeader in project hadoop by apache.
the class BlockReaderRemote method readNextPacket.
private void readNextPacket() throws IOException {
//Read packet headers.
packetReceiver.receiveNextPacket(in);
PacketHeader curHeader = packetReceiver.getHeader();
curDataSlice = packetReceiver.getDataSlice();
assert curDataSlice.capacity() == curHeader.getDataLen();
LOG.trace("DFSClient readNextPacket got header {}", curHeader);
// Sanity check the lengths
if (!curHeader.sanityCheck(lastSeqNo)) {
throw new IOException("BlockReader: error in packet header " + curHeader);
}
if (curHeader.getDataLen() > 0) {
int chunks = 1 + (curHeader.getDataLen() - 1) / bytesPerChecksum;
int checksumsLen = chunks * checksumSize;
assert packetReceiver.getChecksumSlice().capacity() == checksumsLen : "checksum slice capacity=" + packetReceiver.getChecksumSlice().capacity() + " checksumsLen=" + checksumsLen;
lastSeqNo = curHeader.getSeqno();
if (verifyChecksum && curDataSlice.remaining() > 0) {
// N.B.: the checksum error offset reported here is actually
// relative to the start of the block, not the start of the file.
// This is slightly misleading, but preserves the behavior from
// the older BlockReader.
checksum.verifyChunkedSums(curDataSlice, packetReceiver.getChecksumSlice(), filename, curHeader.getOffsetInBlock());
}
bytesNeededToFinish -= curHeader.getDataLen();
}
// the user requested. Skip it.
if (curHeader.getOffsetInBlock() < startOffset) {
int newPos = (int) (startOffset - curHeader.getOffsetInBlock());
curDataSlice.position(newPos);
}
// header, which should be empty
if (bytesNeededToFinish <= 0) {
readTrailingEmptyPacket();
if (verifyChecksum) {
sendReadResult(Status.CHECKSUM_OK);
} else {
sendReadResult(Status.SUCCESS);
}
}
}
Aggregations