use of org.elasticsearch.common.io.stream.InputStreamStreamInput in project elasticsearch by elastic.
the class DeflateCompressor method streamInput.
@Override
public StreamInput streamInput(StreamInput in) throws IOException {
final byte[] headerBytes = new byte[HEADER.length];
int len = 0;
while (len < headerBytes.length) {
final int read = in.read(headerBytes, len, headerBytes.length - len);
if (read == -1) {
break;
}
len += read;
}
if (len != HEADER.length || Arrays.equals(headerBytes, HEADER) == false) {
throw new IllegalArgumentException("Input stream is not compressed with DEFLATE!");
}
final boolean nowrap = true;
final Inflater inflater = new Inflater(nowrap);
InputStream decompressedIn = new InflaterInputStream(in, inflater, BUFFER_SIZE);
decompressedIn = new BufferedInputStream(decompressedIn, BUFFER_SIZE);
return new InputStreamStreamInput(decompressedIn) {
final AtomicBoolean closed = new AtomicBoolean(false);
public void close() throws IOException {
try {
super.close();
} finally {
if (closed.compareAndSet(false, true)) {
// important to release native memory
inflater.end();
}
}
}
};
}
use of org.elasticsearch.common.io.stream.InputStreamStreamInput in project crate by crate.
the class NodeStatsContextTest method testStreamEmptyContext.
@Test
public void testStreamEmptyContext() throws Exception {
NodeStatsContext ctx1 = new NodeStatsContext(false);
ByteArrayOutputStream outBuffer = new ByteArrayOutputStream();
StreamOutput out = new OutputStreamStreamOutput(outBuffer);
ctx1.writeTo(out);
ByteArrayInputStream inBuffer = new ByteArrayInputStream(outBuffer.toByteArray());
InputStreamStreamInput in = new InputStreamStreamInput(inBuffer);
NodeStatsContext ctx2 = new NodeStatsContext(false);
ctx2.readFrom(in);
assertNull(ctx2.id());
assertNull(ctx2.name());
assertNull(ctx2.hostname());
assertNull(ctx2.restUrl());
assertNull(ctx2.port());
assertNull(ctx2.jvmStats());
assertNull(ctx2.osInfo());
assertNull(ctx2.processStats());
assertNull(ctx2.osStats());
assertNull(ctx2.extendedOsStats());
assertNull(ctx2.networkStats());
assertNull(ctx2.extendedProcessCpuStats());
assertNull(ctx2.extendedFsStats());
assertNull(ctx2.threadPools());
}
use of org.elasticsearch.common.io.stream.InputStreamStreamInput in project elasticsearch by elastic.
the class TranslogReader method open.
/**
* Given a file channel, opens a {@link TranslogReader}, taking care of checking and validating the file header.
*
* @param channel the translog file channel
* @param path the path to the translog
* @param checkpoint the translog checkpoint
* @param translogUUID the tranlog UUID
* @return a new TranslogReader
* @throws IOException if any of the file operations resulted in an I/O exception
*/
public static TranslogReader open(final FileChannel channel, final Path path, final Checkpoint checkpoint, final String translogUUID) throws IOException {
try {
// don't close
InputStreamStreamInput headerStream = new InputStreamStreamInput(java.nio.channels.Channels.newInputStream(channel));
// Lucene's CodecUtil writes a magic number of 0x3FD76C17 with the
// header, in binary this looks like:
//
// binary: 0011 1111 1101 0111 0110 1100 0001 0111
// hex : 3 f d 7 6 c 1 7
//
// With version 0 of the translog, the first byte is the
// Operation.Type, which will always be between 0-4, so we know if
// we grab the first byte, it can be:
// 0x3f => Lucene's magic number, so we can assume it's version 1 or later
// 0x00 => version 0 of the translog
//
// otherwise the first byte of the translog is corrupted and we
// should bail
byte b1 = headerStream.readByte();
if (b1 == LUCENE_CODEC_HEADER_BYTE) {
// Read 3 more bytes, meaning a whole integer has been read
byte b2 = headerStream.readByte();
byte b3 = headerStream.readByte();
byte b4 = headerStream.readByte();
// Convert the 4 bytes that were read into an integer
int header = ((b1 & 0xFF) << 24) + ((b2 & 0xFF) << 16) + ((b3 & 0xFF) << 8) + ((b4 & 0xFF) << 0);
// byte separately
if (header != CodecUtil.CODEC_MAGIC) {
throw new TranslogCorruptedException("translog looks like version 1 or later, but has corrupted header. path:" + path);
}
// Confirm the rest of the header using CodecUtil, extracting
// the translog version
int version = CodecUtil.checkHeaderNoMagic(new InputStreamDataInput(headerStream), TranslogWriter.TRANSLOG_CODEC, 1, Integer.MAX_VALUE);
switch(version) {
case TranslogWriter.VERSION_CHECKSUMS:
throw new IllegalStateException("pre-2.0 translog found [" + path + "]");
case TranslogWriter.VERSION_CHECKPOINTS:
assert path.getFileName().toString().endsWith(Translog.TRANSLOG_FILE_SUFFIX) : "new file ends with old suffix: " + path;
assert checkpoint.numOps >= 0 : "expected at least 0 operatin but got: " + checkpoint.numOps;
assert checkpoint.offset <= channel.size() : "checkpoint is inconsistent with channel length: " + channel.size() + " " + checkpoint;
int len = headerStream.readInt();
if (len > channel.size()) {
throw new TranslogCorruptedException("uuid length can't be larger than the translog");
}
BytesRef ref = new BytesRef(len);
ref.length = len;
headerStream.read(ref.bytes, ref.offset, ref.length);
BytesRef uuidBytes = new BytesRef(translogUUID);
if (uuidBytes.bytesEquals(ref) == false) {
throw new TranslogCorruptedException("expected shard UUID " + uuidBytes + " but got: " + ref + " this translog file belongs to a different translog. path:" + path);
}
final long firstOperationOffset = ref.length + CodecUtil.headerLength(TranslogWriter.TRANSLOG_CODEC) + Integer.BYTES;
return new TranslogReader(checkpoint, channel, path, firstOperationOffset);
default:
throw new TranslogCorruptedException("No known translog stream version: " + version + " path:" + path);
}
} else if (b1 == UNVERSIONED_TRANSLOG_HEADER_BYTE) {
throw new IllegalStateException("pre-1.4 translog found [" + path + "]");
} else {
throw new TranslogCorruptedException("Invalid first byte in translog file, got: " + Long.toHexString(b1) + ", expected 0x00 or 0x3f. path:" + path);
}
} catch (CorruptIndexException | IndexFormatTooOldException | IndexFormatTooNewException e) {
throw new TranslogCorruptedException("Translog header corrupted. path:" + path, e);
}
}
use of org.elasticsearch.common.io.stream.InputStreamStreamInput in project elasticsearch by elastic.
the class StoreTests method testMetadataSnapshotStreaming.
public void testMetadataSnapshotStreaming() throws Exception {
Store.MetadataSnapshot outMetadataSnapshot = createMetaDataSnapshot();
org.elasticsearch.Version targetNodeVersion = randomVersion(random());
ByteArrayOutputStream outBuffer = new ByteArrayOutputStream();
OutputStreamStreamOutput out = new OutputStreamStreamOutput(outBuffer);
out.setVersion(targetNodeVersion);
outMetadataSnapshot.writeTo(out);
ByteArrayInputStream inBuffer = new ByteArrayInputStream(outBuffer.toByteArray());
InputStreamStreamInput in = new InputStreamStreamInput(inBuffer);
in.setVersion(targetNodeVersion);
Store.MetadataSnapshot inMetadataSnapshot = new Store.MetadataSnapshot(in);
Map<String, StoreFileMetaData> origEntries = new HashMap<>();
origEntries.putAll(outMetadataSnapshot.asMap());
for (Map.Entry<String, StoreFileMetaData> entry : inMetadataSnapshot.asMap().entrySet()) {
assertThat(entry.getValue().name(), equalTo(origEntries.remove(entry.getKey()).name()));
}
assertThat(origEntries.size(), equalTo(0));
assertThat(inMetadataSnapshot.getCommitUserData(), equalTo(outMetadataSnapshot.getCommitUserData()));
}
use of org.elasticsearch.common.io.stream.InputStreamStreamInput in project elasticsearch by elastic.
the class StoreTests method testStreamStoreFilesMetaData.
public void testStreamStoreFilesMetaData() throws Exception {
Store.MetadataSnapshot metadataSnapshot = createMetaDataSnapshot();
TransportNodesListShardStoreMetaData.StoreFilesMetaData outStoreFileMetaData = new TransportNodesListShardStoreMetaData.StoreFilesMetaData(new ShardId("test", "_na_", 0), metadataSnapshot);
ByteArrayOutputStream outBuffer = new ByteArrayOutputStream();
OutputStreamStreamOutput out = new OutputStreamStreamOutput(outBuffer);
org.elasticsearch.Version targetNodeVersion = randomVersion(random());
out.setVersion(targetNodeVersion);
outStoreFileMetaData.writeTo(out);
ByteArrayInputStream inBuffer = new ByteArrayInputStream(outBuffer.toByteArray());
InputStreamStreamInput in = new InputStreamStreamInput(inBuffer);
in.setVersion(targetNodeVersion);
TransportNodesListShardStoreMetaData.StoreFilesMetaData inStoreFileMetaData = TransportNodesListShardStoreMetaData.StoreFilesMetaData.readStoreFilesMetaData(in);
Iterator<StoreFileMetaData> outFiles = outStoreFileMetaData.iterator();
for (StoreFileMetaData inFile : inStoreFileMetaData) {
assertThat(inFile.name(), equalTo(outFiles.next().name()));
}
assertThat(outStoreFileMetaData.syncId(), equalTo(inStoreFileMetaData.syncId()));
}
Aggregations