use of org.elasticsearch.common.io.stream.OutputStreamStreamOutput in project elasticsearch by elastic.
the class ScriptTests method testScriptSerialization.
public void testScriptSerialization() throws IOException {
Script expectedScript = createScript();
try (ByteArrayOutputStream out = new ByteArrayOutputStream()) {
expectedScript.writeTo(new OutputStreamStreamOutput(out));
try (ByteArrayInputStream in = new ByteArrayInputStream(out.toByteArray())) {
Script actualScript = new Script(new InputStreamStreamInput(in));
assertThat(actualScript, equalTo(expectedScript));
}
}
}
use of org.elasticsearch.common.io.stream.OutputStreamStreamOutput in project crate by crate.
the class DeflateCompressor method streamOutput.
@Override
public StreamOutput streamOutput(StreamOutput out) throws IOException {
out.writeBytes(HEADER);
final boolean nowrap = true;
final Deflater deflater = new Deflater(LEVEL, nowrap);
final boolean syncFlush = true;
DeflaterOutputStream deflaterOutputStream = new DeflaterOutputStream(out, deflater, BUFFER_SIZE, syncFlush);
OutputStream compressedOut = new BufferedOutputStream(deflaterOutputStream, BUFFER_SIZE);
return new OutputStreamStreamOutput(compressedOut) {
final AtomicBoolean closed = new AtomicBoolean(false);
public void close() throws IOException {
try {
super.close();
} finally {
if (closed.compareAndSet(false, true)) {
// important to release native memory
deflater.end();
}
}
}
};
}
use of org.elasticsearch.common.io.stream.OutputStreamStreamOutput in project crate by crate.
the class TranslogHeader method write.
/**
* Writes this header with the latest format into the file channel
*/
void write(final FileChannel channel) throws IOException {
// This output is intentionally not closed because closing it will close the FileChannel.
@SuppressWarnings({ "IOResourceOpenedButNotSafelyClosed", "resource" }) final BufferedChecksumStreamOutput out = new BufferedChecksumStreamOutput(new OutputStreamStreamOutput(java.nio.channels.Channels.newOutputStream(channel)));
CodecUtil.writeHeader(new OutputStreamDataOutput(out), TRANSLOG_CODEC, CURRENT_VERSION);
// Write uuid
final BytesRef uuid = new BytesRef(translogUUID);
out.writeInt(uuid.length);
out.writeBytes(uuid.bytes, uuid.offset, uuid.length);
// Write primary term
out.writeLong(primaryTerm);
// Checksum header
out.writeInt((int) out.getChecksum());
out.flush();
channel.force(true);
assert channel.position() == headerSizeInBytes : "Header is not fully written; header size [" + headerSizeInBytes + "], channel position [" + channel.position() + "]";
}
use of org.elasticsearch.common.io.stream.OutputStreamStreamOutput in project elasticsearch by elastic.
the class BlobStoreRepository method writeIncompatibleSnapshots.
/**
* Writes the incompatible snapshot ids list to the `incompatible-snapshots` blob in the repository.
*
* Package private for testing.
*/
void writeIncompatibleSnapshots(RepositoryData repositoryData) throws IOException {
// can not write to a read only repository
assert isReadOnly() == false;
final BytesReference bytes;
try (BytesStreamOutput bStream = new BytesStreamOutput()) {
try (StreamOutput stream = new OutputStreamStreamOutput(bStream)) {
XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON, stream);
repositoryData.incompatibleSnapshotsToXContent(builder, ToXContent.EMPTY_PARAMS);
builder.close();
}
bytes = bStream.bytes();
}
// write the incompatible snapshots blob
writeAtomic(INCOMPATIBLE_SNAPSHOTS_BLOB, bytes);
}
use of org.elasticsearch.common.io.stream.OutputStreamStreamOutput in project elasticsearch by elastic.
the class StoreTests method testMetadataSnapshotStreaming.
public void testMetadataSnapshotStreaming() throws Exception {
Store.MetadataSnapshot outMetadataSnapshot = createMetaDataSnapshot();
org.elasticsearch.Version targetNodeVersion = randomVersion(random());
ByteArrayOutputStream outBuffer = new ByteArrayOutputStream();
OutputStreamStreamOutput out = new OutputStreamStreamOutput(outBuffer);
out.setVersion(targetNodeVersion);
outMetadataSnapshot.writeTo(out);
ByteArrayInputStream inBuffer = new ByteArrayInputStream(outBuffer.toByteArray());
InputStreamStreamInput in = new InputStreamStreamInput(inBuffer);
in.setVersion(targetNodeVersion);
Store.MetadataSnapshot inMetadataSnapshot = new Store.MetadataSnapshot(in);
Map<String, StoreFileMetaData> origEntries = new HashMap<>();
origEntries.putAll(outMetadataSnapshot.asMap());
for (Map.Entry<String, StoreFileMetaData> entry : inMetadataSnapshot.asMap().entrySet()) {
assertThat(entry.getValue().name(), equalTo(origEntries.remove(entry.getKey()).name()));
}
assertThat(origEntries.size(), equalTo(0));
assertThat(inMetadataSnapshot.getCommitUserData(), equalTo(outMetadataSnapshot.getCommitUserData()));
}
Aggregations