use of org.elasticsearch.common.compress.Compressor in project elasticsearch by elastic.
the class XContentHelper method createParser.
/**
* Creates a parser for the bytes using the supplied content-type
*/
public static XContentParser createParser(NamedXContentRegistry xContentRegistry, BytesReference bytes, XContentType xContentType) throws IOException {
Objects.requireNonNull(xContentType);
Compressor compressor = CompressorFactory.compressor(bytes);
if (compressor != null) {
InputStream compressedInput = compressor.streamInput(bytes.streamInput());
if (compressedInput.markSupported() == false) {
compressedInput = new BufferedInputStream(compressedInput);
}
return XContentFactory.xContent(xContentType).createParser(xContentRegistry, compressedInput);
} else {
return xContentType.xContent().createParser(xContentRegistry, bytes.streamInput());
}
}
use of org.elasticsearch.common.compress.Compressor in project elasticsearch by elastic.
the class PublishClusterStateAction method handleIncomingClusterStateRequest.
protected void handleIncomingClusterStateRequest(BytesTransportRequest request, TransportChannel channel) throws IOException {
Compressor compressor = CompressorFactory.compressor(request.bytes());
StreamInput in = request.bytes().streamInput();
try {
if (compressor != null) {
in = compressor.streamInput(in);
}
in = new NamedWriteableAwareStreamInput(in, namedWriteableRegistry);
in.setVersion(request.version());
synchronized (lastSeenClusterStateMutex) {
final ClusterState incomingState;
// If true we received full cluster state - otherwise diffs
if (in.readBoolean()) {
incomingState = ClusterState.readFrom(in, clusterStateSupplier.get().nodes().getLocalNode());
logger.debug("received full cluster state version [{}] with size [{}]", incomingState.version(), request.bytes().length());
} else if (lastSeenClusterState != null) {
Diff<ClusterState> diff = ClusterState.readDiffFrom(in, lastSeenClusterState.nodes().getLocalNode());
incomingState = diff.apply(lastSeenClusterState);
logger.debug("received diff cluster state version [{}] with uuid [{}], diff size [{}]", incomingState.version(), incomingState.stateUUID(), request.bytes().length());
} else {
logger.debug("received diff for but don't have any local cluster state - requesting full state");
throw new IncompatibleClusterStateVersionException("have no local cluster state");
}
// sanity check incoming state
validateIncomingState(incomingState, lastSeenClusterState);
pendingStatesQueue.addPending(incomingState);
lastSeenClusterState = incomingState;
}
} finally {
IOUtils.close(in);
}
channel.sendResponse(TransportResponse.Empty.INSTANCE);
}
use of org.elasticsearch.common.compress.Compressor in project elasticsearch by elastic.
the class XContentHelper method createParser.
/**
* Creates a parser based on the bytes provided
* @deprecated use {@link #createParser(NamedXContentRegistry, BytesReference, XContentType)} to avoid content type auto-detection
*/
@Deprecated
public static XContentParser createParser(NamedXContentRegistry xContentRegistry, BytesReference bytes) throws IOException {
Compressor compressor = CompressorFactory.compressor(bytes);
if (compressor != null) {
InputStream compressedInput = compressor.streamInput(bytes.streamInput());
if (compressedInput.markSupported() == false) {
compressedInput = new BufferedInputStream(compressedInput);
}
final XContentType contentType = XContentFactory.xContentType(compressedInput);
return XContentFactory.xContent(contentType).createParser(xContentRegistry, compressedInput);
} else {
return XContentFactory.xContent(bytes).createParser(xContentRegistry, bytes.streamInput());
}
}
use of org.elasticsearch.common.compress.Compressor in project elasticsearch by elastic.
the class XContentHelper method writeRawField.
/**
* Writes a "raw" (bytes) field, handling cases where the bytes are compressed, and tries to optimize writing using
* {@link XContentBuilder#rawField(String, org.elasticsearch.common.bytes.BytesReference, XContentType)}.
*/
public static void writeRawField(String field, BytesReference source, XContentType xContentType, XContentBuilder builder, ToXContent.Params params) throws IOException {
Objects.requireNonNull(xContentType);
Compressor compressor = CompressorFactory.compressor(source);
if (compressor != null) {
InputStream compressedStreamInput = compressor.streamInput(source.streamInput());
builder.rawField(field, compressedStreamInput, xContentType);
} else {
builder.rawField(field, source, xContentType);
}
}
use of org.elasticsearch.common.compress.Compressor in project elasticsearch by elastic.
the class XContentHelper method convertToMap.
/**
* Converts the given bytes into a map that is optionally ordered. The provided {@link XContentType} must be non-null.
*/
public static Tuple<XContentType, Map<String, Object>> convertToMap(BytesReference bytes, boolean ordered, XContentType xContentType) throws ElasticsearchParseException {
try {
final XContentType contentType;
InputStream input;
Compressor compressor = CompressorFactory.compressor(bytes);
if (compressor != null) {
InputStream compressedStreamInput = compressor.streamInput(bytes.streamInput());
if (compressedStreamInput.markSupported() == false) {
compressedStreamInput = new BufferedInputStream(compressedStreamInput);
}
input = compressedStreamInput;
} else {
input = bytes.streamInput();
}
contentType = xContentType != null ? xContentType : XContentFactory.xContentType(input);
return new Tuple<>(Objects.requireNonNull(contentType), convertToMap(XContentFactory.xContent(contentType), input, ordered));
} catch (IOException e) {
throw new ElasticsearchParseException("Failed to parse content to map", e);
}
}
Aggregations