use of org.opensearch.common.io.stream.InputStreamStreamInput in project OpenSearch by opensearch-project.
the class PercolatorFieldMapperTests method testImplicitlySetDefaultScriptLang.
public void testImplicitlySetDefaultScriptLang() throws Exception {
addQueryFieldMappings();
XContentBuilder query = jsonBuilder();
query.startObject();
query.startObject("script");
if (randomBoolean()) {
query.field("script", "return true");
} else {
query.startObject("script");
query.field("source", "return true");
query.endObject();
}
query.endObject();
query.endObject();
ParsedDocument doc = mapperService.documentMapper("doc").parse(new SourceToParse("test", "doc", "1", BytesReference.bytes(XContentFactory.jsonBuilder().startObject().rawField(fieldName, new BytesArray(Strings.toString(query)).streamInput(), query.contentType()).endObject()), XContentType.JSON));
BytesRef querySource = doc.rootDoc().getFields(fieldType.queryBuilderField.name())[0].binaryValue();
try (InputStream in = new ByteArrayInputStream(querySource.bytes, querySource.offset, querySource.length)) {
try (StreamInput input = new NamedWriteableAwareStreamInput(new InputStreamStreamInput(in), writableRegistry())) {
// Query builder's content is stored via BinaryFieldMapper, which has a custom encoding
// to encode multiple binary values into a single binary doc values field.
// This is the reason we need to first need to read the number of values and
// then the length of the field value in bytes.
input.readVInt();
input.readVInt();
ScriptQueryBuilder queryBuilder = (ScriptQueryBuilder) input.readNamedWriteable(QueryBuilder.class);
assertEquals(Script.DEFAULT_SCRIPT_LANG, queryBuilder.script().getLang());
}
}
query = jsonBuilder();
query.startObject();
query.startObject("function_score");
query.startArray("functions");
query.startObject();
query.startObject("script_score");
if (randomBoolean()) {
query.field("script", "return true");
} else {
query.startObject("script");
query.field("source", "return true");
query.endObject();
}
query.endObject();
query.endObject();
query.endArray();
query.endObject();
query.endObject();
doc = mapperService.documentMapper("doc").parse(new SourceToParse("test", "doc", "1", BytesReference.bytes(XContentFactory.jsonBuilder().startObject().rawField(fieldName, new BytesArray(Strings.toString(query)).streamInput(), query.contentType()).endObject()), XContentType.JSON));
querySource = doc.rootDoc().getFields(fieldType.queryBuilderField.name())[0].binaryValue();
try (InputStream in = new ByteArrayInputStream(querySource.bytes, querySource.offset, querySource.length)) {
try (StreamInput input = new NamedWriteableAwareStreamInput(new InputStreamStreamInput(in), writableRegistry())) {
input.readVInt();
input.readVInt();
FunctionScoreQueryBuilder queryBuilder = (FunctionScoreQueryBuilder) input.readNamedWriteable(QueryBuilder.class);
ScriptScoreFunctionBuilder function = (ScriptScoreFunctionBuilder) queryBuilder.filterFunctionBuilders()[0].getScoreFunction();
assertEquals(Script.DEFAULT_SCRIPT_LANG, function.getScript().getLang());
}
}
}
use of org.opensearch.common.io.stream.InputStreamStreamInput in project OpenSearch by opensearch-project.
the class PercolateQueryBuilder method createStore.
static PercolateQuery.QueryStore createStore(MappedFieldType queryBuilderFieldType, QueryShardContext context) {
Version indexVersion = context.indexVersionCreated();
NamedWriteableRegistry registry = context.getWriteableRegistry();
return ctx -> {
LeafReader leafReader = ctx.reader();
BinaryDocValues binaryDocValues = leafReader.getBinaryDocValues(queryBuilderFieldType.name());
if (binaryDocValues == null) {
return docId -> null;
}
return docId -> {
if (binaryDocValues.advanceExact(docId)) {
BytesRef qbSource = binaryDocValues.binaryValue();
try (InputStream in = new ByteArrayInputStream(qbSource.bytes, qbSource.offset, qbSource.length)) {
try (StreamInput input = new NamedWriteableAwareStreamInput(new InputStreamStreamInput(in, qbSource.length), registry)) {
input.setVersion(indexVersion);
// Query builder's content is stored via BinaryFieldMapper, which has a custom encoding
// to encode multiple binary values into a single binary doc values field.
// This is the reason we need to first need to read the number of values and
// then the length of the field value in bytes.
int numValues = input.readVInt();
assert numValues == 1;
int valueLength = input.readVInt();
assert valueLength > 0;
QueryBuilder queryBuilder = input.readNamedWriteable(QueryBuilder.class);
assert in.read() == -1;
queryBuilder = Rewriteable.rewrite(queryBuilder, context);
return queryBuilder.toQuery(context);
}
}
} else {
return null;
}
};
};
}
use of org.opensearch.common.io.stream.InputStreamStreamInput in project OpenSearch by opensearch-project.
the class ConcurrentSeqNoVersioningIT method runLinearizabilityChecker.
@SuppressForbidden(reason = "system out is ok for a command line tool")
private static void runLinearizabilityChecker(FileInputStream fileInputStream, long primaryTerm, long seqNo) throws IOException {
StreamInput is = new InputStreamStreamInput(Base64.getDecoder().wrap(fileInputStream));
is = new NamedWriteableAwareStreamInput(is, createNamedWriteableRegistry());
LinearizabilityChecker.History history = readHistory(is);
Version initialVersion = new Version(primaryTerm, seqNo);
boolean result = new LinearizabilityChecker().isLinearizable(new CASSequentialSpec(initialVersion), history, missingResponseGenerator());
System.out.println(LinearizabilityChecker.visualize(new CASSequentialSpec(initialVersion), history, missingResponseGenerator()));
System.out.println("Linearizable?: " + result);
}
use of org.opensearch.common.io.stream.InputStreamStreamInput in project OpenSearch by opensearch-project.
the class StoreTests method testMetadataSnapshotStreaming.
public void testMetadataSnapshotStreaming() throws Exception {
Store.MetadataSnapshot outMetadataSnapshot = createMetadataSnapshot();
org.opensearch.Version targetNodeVersion = randomVersion(random());
ByteArrayOutputStream outBuffer = new ByteArrayOutputStream();
OutputStreamStreamOutput out = new OutputStreamStreamOutput(outBuffer);
out.setVersion(targetNodeVersion);
outMetadataSnapshot.writeTo(out);
ByteArrayInputStream inBuffer = new ByteArrayInputStream(outBuffer.toByteArray());
InputStreamStreamInput in = new InputStreamStreamInput(inBuffer);
in.setVersion(targetNodeVersion);
Store.MetadataSnapshot inMetadataSnapshot = new Store.MetadataSnapshot(in);
Map<String, StoreFileMetadata> origEntries = new HashMap<>();
origEntries.putAll(outMetadataSnapshot.asMap());
for (Map.Entry<String, StoreFileMetadata> entry : inMetadataSnapshot.asMap().entrySet()) {
assertThat(entry.getValue().name(), equalTo(origEntries.remove(entry.getKey()).name()));
}
assertThat(origEntries.size(), equalTo(0));
assertThat(inMetadataSnapshot.getCommitUserData(), equalTo(outMetadataSnapshot.getCommitUserData()));
}
use of org.opensearch.common.io.stream.InputStreamStreamInput in project OpenSearch by opensearch-project.
the class StoreTests method testStreamStoreFilesMetadata.
public void testStreamStoreFilesMetadata() throws Exception {
Store.MetadataSnapshot metadataSnapshot = createMetadataSnapshot();
int numOfLeases = randomIntBetween(0, 10);
List<RetentionLease> peerRecoveryRetentionLeases = new ArrayList<>();
for (int i = 0; i < numOfLeases; i++) {
peerRecoveryRetentionLeases.add(new RetentionLease(ReplicationTracker.getPeerRecoveryRetentionLeaseId(UUIDs.randomBase64UUID()), randomNonNegativeLong(), randomNonNegativeLong(), ReplicationTracker.PEER_RECOVERY_RETENTION_LEASE_SOURCE));
}
TransportNodesListShardStoreMetadata.StoreFilesMetadata outStoreFileMetadata = new TransportNodesListShardStoreMetadata.StoreFilesMetadata(new ShardId("test", "_na_", 0), metadataSnapshot, peerRecoveryRetentionLeases);
ByteArrayOutputStream outBuffer = new ByteArrayOutputStream();
OutputStreamStreamOutput out = new OutputStreamStreamOutput(outBuffer);
org.opensearch.Version targetNodeVersion = randomVersion(random());
out.setVersion(targetNodeVersion);
outStoreFileMetadata.writeTo(out);
ByteArrayInputStream inBuffer = new ByteArrayInputStream(outBuffer.toByteArray());
InputStreamStreamInput in = new InputStreamStreamInput(inBuffer);
in.setVersion(targetNodeVersion);
TransportNodesListShardStoreMetadata.StoreFilesMetadata inStoreFileMetadata = new TransportNodesListShardStoreMetadata.StoreFilesMetadata(in);
Iterator<StoreFileMetadata> outFiles = outStoreFileMetadata.iterator();
for (StoreFileMetadata inFile : inStoreFileMetadata) {
assertThat(inFile.name(), equalTo(outFiles.next().name()));
}
assertThat(outStoreFileMetadata.syncId(), equalTo(inStoreFileMetadata.syncId()));
assertThat(outStoreFileMetadata.peerRecoveryRetentionLeases(), equalTo(peerRecoveryRetentionLeases));
}
Aggregations