use of org.apache.hadoop.io.DataOutputBuffer in project hadoop by apache.
the class QuorumOutputStream method flushAndSync.
@Override
protected void flushAndSync(boolean durable) throws IOException {
int numReadyBytes = buf.countReadyBytes();
if (numReadyBytes > 0) {
int numReadyTxns = buf.countReadyTxns();
long firstTxToFlush = buf.getFirstReadyTxId();
assert numReadyTxns > 0;
// Copy from our double-buffer into a new byte array. This is for
// two reasons:
// 1) The IPC code has no way of specifying to send only a slice of
// a larger array.
// 2) because the calls to the underlying nodes are asynchronous, we
// need a defensive copy to avoid accidentally mutating the buffer
// before it is sent.
DataOutputBuffer bufToSend = new DataOutputBuffer(numReadyBytes);
buf.flushTo(bufToSend);
assert bufToSend.getLength() == numReadyBytes;
byte[] data = bufToSend.getData();
assert data.length == bufToSend.getLength();
QuorumCall<AsyncLogger, Void> qcall = loggers.sendEdits(segmentTxId, firstTxToFlush, numReadyTxns, data);
loggers.waitForWriteQuorum(qcall, writeTimeoutMs, "sendEdits");
// Since we successfully wrote this batch, let the loggers know. Any future
// RPCs will thus let the loggers know of the most recent transaction, even
// if a logger has fallen behind.
loggers.setCommittedTxId(firstTxToFlush + numReadyTxns - 1);
// stale on the Journal Nodes
if (updateCommittedTxId) {
QuorumCall<AsyncLogger, Void> fakeCall = loggers.sendEdits(segmentTxId, firstTxToFlush, 0, new byte[0]);
loggers.waitForWriteQuorum(fakeCall, writeTimeoutMs, "sendEdits");
}
}
}
use of org.apache.hadoop.io.DataOutputBuffer in project hbase by apache.
the class TestCompressionTest method nativeCodecTest.
/**
* Verify CompressionTest.testCompression() on a native codec.
*/
private void nativeCodecTest(String codecName, String libName, String codecClassName) {
if (isCompressionAvailable(codecClassName)) {
try {
if (libName != null) {
System.loadLibrary(libName);
}
try {
Configuration conf = new Configuration();
CompressionCodec codec = (CompressionCodec) ReflectionUtils.newInstance(conf.getClassByName(codecClassName), conf);
DataOutputBuffer compressedDataBuffer = new DataOutputBuffer();
CompressionOutputStream deflateFilter = codec.createOutputStream(compressedDataBuffer);
byte[] data = new byte[1024];
DataOutputStream deflateOut = new DataOutputStream(new BufferedOutputStream(deflateFilter));
deflateOut.write(data, 0, data.length);
deflateOut.flush();
deflateFilter.finish();
// Codec class, codec nativelib and Hadoop nativelib with codec JNIs are present
assertTrue(CompressionTest.testCompression(codecName));
} catch (UnsatisfiedLinkError e) {
// Hadoop nativelib does not have codec JNIs.
// cannot assert the codec here because the current logic of
// CompressionTest checks only classloading, not the codec
// usage.
LOG.debug("No JNI for codec '" + codecName + "' " + e.getMessage());
} catch (Exception e) {
LOG.error(codecName, e);
}
} catch (UnsatisfiedLinkError e) {
// nativelib is not available
LOG.debug("Native lib not available: " + codecName);
assertFalse(CompressionTest.testCompression(codecName));
}
} else {
// Compression Codec class is not available
LOG.debug("Codec class not available: " + codecName);
assertFalse(CompressionTest.testCompression(codecName));
}
}
use of org.apache.hadoop.io.DataOutputBuffer in project hadoop by apache.
the class TestBlockToken method testCraftedProtobufBlockTokenIdentifier.
public void testCraftedProtobufBlockTokenIdentifier(BlockTokenIdentifier identifier, boolean expectIOE, boolean expectRTE) throws IOException {
DataOutputBuffer dob = new DataOutputBuffer(4096);
DataInputBuffer dib = new DataInputBuffer();
identifier.writeProtobuf(dob);
byte[] identBytes = Arrays.copyOf(dob.getData(), dob.getLength());
BlockTokenIdentifier legacyToken = new BlockTokenIdentifier();
BlockTokenIdentifier protobufToken = new BlockTokenIdentifier();
BlockTokenIdentifier readToken = new BlockTokenIdentifier();
boolean invalidLegacyMessage = false;
try {
dib.reset(identBytes, identBytes.length);
legacyToken.readFieldsLegacy(dib);
} catch (IOException e) {
if (!expectIOE) {
fail("Received IOException but it was not expected.");
}
invalidLegacyMessage = true;
} catch (RuntimeException e) {
if (!expectRTE) {
fail("Received RuntimeException but it was not expected.");
}
invalidLegacyMessage = true;
}
assertTrue(invalidLegacyMessage);
dib.reset(identBytes, identBytes.length);
protobufToken.readFieldsProtobuf(dib);
dib.reset(identBytes, identBytes.length);
readToken.readFieldsProtobuf(dib);
assertEquals(protobufToken, readToken);
}
use of org.apache.hadoop.io.DataOutputBuffer in project hadoop by apache.
the class QJMTestUtil method createTxnData.
public static byte[] createTxnData(int startTxn, int numTxns) throws Exception {
DataOutputBuffer buf = new DataOutputBuffer();
FSEditLogOp.Writer writer = new FSEditLogOp.Writer(buf);
for (long txid = startTxn; txid < startTxn + numTxns; txid++) {
FSEditLogOp op = NameNodeAdapter.createMkdirOp("tx " + txid);
op.setTransactionId(txid);
writer.writeOp(op);
}
return Arrays.copyOf(buf.getData(), buf.getLength());
}
use of org.apache.hadoop.io.DataOutputBuffer in project hadoop by apache.
the class QJMTestUtil method createGabageTxns.
/**
* Generate byte array representing a set of GarbageMkdirOp
*/
public static byte[] createGabageTxns(long startTxId, int numTxns) throws IOException {
DataOutputBuffer buf = new DataOutputBuffer();
FSEditLogOp.Writer writer = new FSEditLogOp.Writer(buf);
for (long txid = startTxId; txid < startTxId + numTxns; txid++) {
FSEditLogOp op = new TestEditLog.GarbageMkdirOp();
op.setTransactionId(txid);
writer.writeOp(op);
}
return Arrays.copyOf(buf.getData(), buf.getLength());
}
Aggregations