use of org.apache.hadoop.io.compress.lz4.Lz4Compressor in project kafka by apache.
the class KafkaLZ4BlockInputStream method detectBrokenLz4Version.
/**
* Checks whether the version of lz4 on the classpath has the fix for reading from ByteBuffers with
* non-zero array offsets (see https://github.com/lz4/lz4-java/pull/65)
*/
static void detectBrokenLz4Version() {
byte[] source = new byte[] { 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3 };
final LZ4Compressor compressor = LZ4Factory.fastestInstance().fastCompressor();
final byte[] compressed = new byte[compressor.maxCompressedLength(source.length)];
final int compressedLength = compressor.compress(source, 0, source.length, compressed, 0, compressed.length);
// allocate an array-backed ByteBuffer with non-zero array-offset containing the compressed data
// a buggy decompressor will read the data from the beginning of the underlying array instead of
// the beginning of the ByteBuffer, failing to decompress the invalid data.
final byte[] zeroes = { 0, 0, 0, 0, 0 };
ByteBuffer nonZeroOffsetBuffer = ByteBuffer.allocate(// allocates the backing array with extra space to offset the data
zeroes.length + compressed.length).put(// prepend invalid bytes (zeros) before the compressed data in the array
zeroes).slice().put(// write the compressed data at the beginning of this new buffer
compressed);
ByteBuffer dest = ByteBuffer.allocate(source.length);
try {
DECOMPRESSOR.decompress(nonZeroOffsetBuffer, 0, compressedLength, dest, 0, source.length);
} catch (Exception e) {
throw new RuntimeException("Kafka has detected detected a buggy lz4-java library (< 1.4.x) on the classpath." + " If you are using Kafka client libraries, make sure your application does not" + " accidentally override the version provided by Kafka or include multiple versions" + " of the library on the classpath. The lz4-java version on the classpath should" + " match the version the Kafka client libraries depend on. Adding -verbose:class" + " to your JVM arguments may help understand which lz4-java version is getting loaded.", e);
}
}
use of org.apache.hadoop.io.compress.lz4.Lz4Compressor in project cassandra by apache.
the class OutboundTcpConnection method connect.
@SuppressWarnings("resource")
private boolean connect() {
logger.debug("Attempting to connect to {}", poolReference.endPoint());
long start = System.nanoTime();
long timeout = TimeUnit.MILLISECONDS.toNanos(DatabaseDescriptor.getRpcTimeout());
while (System.nanoTime() - start < timeout) {
targetVersion = MessagingService.instance().getVersion(poolReference.endPoint());
try {
socket = poolReference.newSocket();
socket.setKeepAlive(true);
if (isLocalDC(poolReference.endPoint())) {
socket.setTcpNoDelay(INTRADC_TCP_NODELAY);
} else {
socket.setTcpNoDelay(DatabaseDescriptor.getInterDCTcpNoDelay());
}
if (DatabaseDescriptor.getInternodeSendBufferSize() > 0) {
try {
socket.setSendBufferSize(DatabaseDescriptor.getInternodeSendBufferSize());
} catch (SocketException se) {
logger.warn("Failed to set send buffer size on internode socket.", se);
}
}
// SocketChannel may be null when using SSL
WritableByteChannel ch = socket.getChannel();
out = new BufferedDataOutputStreamPlus(ch != null ? ch : Channels.newChannel(socket.getOutputStream()), BUFFER_SIZE);
out.writeInt(MessagingService.PROTOCOL_MAGIC);
writeHeader(out, targetVersion, shouldCompressConnection());
out.flush();
DataInputStream in = new DataInputStream(socket.getInputStream());
int maxTargetVersion = handshakeVersion(in);
if (maxTargetVersion == NO_VERSION) {
// no version is returned, so disconnect an try again
logger.trace("Target max version is {}; no version information yet, will retry", maxTargetVersion);
disconnect();
continue;
} else {
MessagingService.instance().setVersion(poolReference.endPoint(), maxTargetVersion);
}
if (targetVersion > maxTargetVersion) {
logger.trace("Target max version is {}; will reconnect with that version", maxTargetVersion);
try {
if (DatabaseDescriptor.getSeeds().contains(poolReference.endPoint()))
logger.warn("Seed gossip version is {}; will not connect with that version", maxTargetVersion);
} catch (Throwable e) {
// If invalid yaml has been added to the config since startup, getSeeds() will throw an AssertionError
// Additionally, third party seed providers may throw exceptions if network is flakey
// Regardless of what's thrown, we must catch it, disconnect, and try again
JVMStabilityInspector.inspectThrowable(e);
logger.warn("Configuration error prevented outbound connection: {}", e.getLocalizedMessage());
} finally {
disconnect();
return false;
}
}
if (targetVersion < maxTargetVersion && targetVersion < MessagingService.current_version) {
logger.trace("Detected higher max version {} (using {}); will reconnect when queued messages are done", maxTargetVersion, targetVersion);
softCloseSocket();
}
out.writeInt(MessagingService.current_version);
CompactEndpointSerializationHelper.serialize(FBUtilities.getBroadcastAddress(), out);
if (shouldCompressConnection()) {
out.flush();
logger.trace("Upgrading OutputStream to {} to be compressed", poolReference.endPoint());
// TODO: custom LZ4 OS that supports BB write methods
LZ4Compressor compressor = LZ4Factory.fastestInstance().fastCompressor();
Checksum checksum = XXHashFactory.fastestInstance().newStreamingHash32(LZ4_HASH_SEED).asChecksum();
out = new WrappedDataOutputStreamPlus(new LZ4BlockOutputStream(socket.getOutputStream(), // 16k block size
1 << 14, compressor, checksum, // no async flushing
true));
}
logger.debug("Done connecting to {}", poolReference.endPoint());
return true;
} catch (SSLHandshakeException e) {
logger.error("SSL handshake error for outbound connection to " + socket, e);
socket = null;
// SSL errors won't be recoverable within timeout period so we'll just abort
return false;
} catch (IOException e) {
socket = null;
logger.debug("Unable to connect to {}", poolReference.endPoint(), e);
Uninterruptibles.sleepUninterruptibly(OPEN_RETRY_DELAY, TimeUnit.MILLISECONDS);
}
}
return false;
}
use of org.apache.hadoop.io.compress.lz4.Lz4Compressor in project druid by druid-io.
the class LZ4Transcoder method compress.
@Override
protected byte[] compress(byte[] in) {
if (in == null) {
throw new NullPointerException("Can't compress null");
}
LZ4Compressor compressor = lz4Factory.fastCompressor();
byte[] out = new byte[compressor.maxCompressedLength(in.length)];
int compressedLength = compressor.compress(in, 0, in.length, out, 0);
getLogger().debug("Compressed %d bytes to %d", in.length, compressedLength);
return ByteBuffer.allocate(Ints.BYTES + compressedLength).putInt(in.length).put(out, 0, compressedLength).array();
}
use of org.apache.hadoop.io.compress.lz4.Lz4Compressor in project hadoop by apache.
the class TestCompressorDecompressor method testCompressorDecompressor.
@Test
public void testCompressorDecompressor() {
// no more for this data
int SIZE = 44 * 1024;
byte[] rawData = generate(SIZE);
try {
CompressDecompressTester.of(rawData).withCompressDecompressPair(new SnappyCompressor(), new SnappyDecompressor()).withCompressDecompressPair(new Lz4Compressor(), new Lz4Decompressor()).withCompressDecompressPair(new BuiltInZlibDeflater(), new BuiltInZlibInflater()).withTestCases(ImmutableSet.of(CompressionTestStrategy.COMPRESS_DECOMPRESS_SINGLE_BLOCK, CompressionTestStrategy.COMPRESS_DECOMPRESS_BLOCK, CompressionTestStrategy.COMPRESS_DECOMPRESS_ERRORS, CompressionTestStrategy.COMPRESS_DECOMPRESS_WITH_EMPTY_STREAM)).test();
} catch (Exception ex) {
GenericTestUtils.assertExceptionContains("testCompressorDecompressor error !!!", ex);
}
}
use of org.apache.hadoop.io.compress.lz4.Lz4Compressor in project hadoop by apache.
the class TestLz4CompressorDecompressor method testCompressorCompressAIOBException.
//test on ArrayIndexOutOfBoundsException in {@code compressor.compress()}
@Test
public void testCompressorCompressAIOBException() {
try {
Lz4Compressor compressor = new Lz4Compressor();
byte[] bytes = generate(1024 * 6);
compressor.setInput(bytes, 0, bytes.length);
compressor.compress(new byte[] {}, 0, -1);
fail("testCompressorCompressAIOBException error !!!");
} catch (ArrayIndexOutOfBoundsException ex) {
// expected
} catch (Exception e) {
fail("testCompressorCompressAIOBException ex error !!!");
}
}
Aggregations