use of org.elasticsearch.common.bytes.BytesReference in project elasticsearch by elastic.
the class MockTcpTransport method readMessage.
private void readMessage(MockChannel mockChannel, StreamInput input) throws IOException {
Socket socket = mockChannel.activeChannel;
byte[] minimalHeader = new byte[TcpHeader.MARKER_BYTES_SIZE];
int firstByte = input.read();
if (firstByte == -1) {
throw new IOException("Connection reset by peer");
}
minimalHeader[0] = (byte) firstByte;
minimalHeader[1] = (byte) input.read();
int msgSize = input.readInt();
if (msgSize == -1) {
socket.getOutputStream().flush();
} else {
BytesStreamOutput output = new BytesStreamOutput();
final byte[] buffer = new byte[msgSize];
input.readFully(buffer);
output.write(minimalHeader);
output.writeInt(msgSize);
output.write(buffer);
final BytesReference bytes = output.bytes();
if (TcpTransport.validateMessageHeader(bytes)) {
InetSocketAddress remoteAddress = (InetSocketAddress) socket.getRemoteSocketAddress();
messageReceived(bytes.slice(TcpHeader.MARKER_BYTES_SIZE + TcpHeader.MESSAGE_LENGTH_SIZE, msgSize), mockChannel, mockChannel.profile, remoteAddress, msgSize);
} else {
// ping message - we just drop all stuff
}
}
}
use of org.elasticsearch.common.bytes.BytesReference in project elasticsearch by elastic.
the class GetResult method fromXContentEmbedded.
public static GetResult fromXContentEmbedded(XContentParser parser) throws IOException {
XContentParser.Token token = parser.nextToken();
ensureExpectedToken(XContentParser.Token.FIELD_NAME, token, parser::getTokenLocation);
String currentFieldName = parser.currentName();
String index = null, type = null, id = null;
long version = -1;
boolean found = false;
BytesReference source = null;
Map<String, GetField> fields = new HashMap<>();
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
currentFieldName = parser.currentName();
} else if (token.isValue()) {
if (_INDEX.equals(currentFieldName)) {
index = parser.text();
} else if (_TYPE.equals(currentFieldName)) {
type = parser.text();
} else if (_ID.equals(currentFieldName)) {
id = parser.text();
} else if (_VERSION.equals(currentFieldName)) {
version = parser.longValue();
} else if (FOUND.equals(currentFieldName)) {
found = parser.booleanValue();
} else {
fields.put(currentFieldName, new GetField(currentFieldName, Collections.singletonList(parser.objectText())));
}
} else if (token == XContentParser.Token.START_OBJECT) {
if (SourceFieldMapper.NAME.equals(currentFieldName)) {
try (XContentBuilder builder = XContentBuilder.builder(parser.contentType().xContent())) {
//the original document gets slightly modified: whitespaces or pretty printing are not preserved,
//it all depends on the current builder settings
builder.copyCurrentStructure(parser);
source = builder.bytes();
}
} else if (FIELDS.equals(currentFieldName)) {
while (parser.nextToken() != XContentParser.Token.END_OBJECT) {
GetField getField = GetField.fromXContent(parser);
fields.put(getField.getName(), getField);
}
} else {
throwUnknownField(currentFieldName, parser.getTokenLocation());
}
}
}
return new GetResult(index, type, id, version, found, source, fields);
}
use of org.elasticsearch.common.bytes.BytesReference in project elasticsearch by elastic.
the class ShardGetService method innerGetLoadFromStoredFields.
private GetResult innerGetLoadFromStoredFields(String type, String id, String[] gFields, FetchSourceContext fetchSourceContext, Engine.GetResult get, MapperService mapperService) {
Map<String, GetField> fields = null;
BytesReference source = null;
Versions.DocIdAndVersion docIdAndVersion = get.docIdAndVersion();
FieldsVisitor fieldVisitor = buildFieldsVisitors(gFields, fetchSourceContext);
if (fieldVisitor != null) {
try {
docIdAndVersion.context.reader().document(docIdAndVersion.docId, fieldVisitor);
} catch (IOException e) {
throw new ElasticsearchException("Failed to get type [" + type + "] and id [" + id + "]", e);
}
source = fieldVisitor.source();
if (!fieldVisitor.fields().isEmpty()) {
fieldVisitor.postProcess(mapperService);
fields = new HashMap<>(fieldVisitor.fields().size());
for (Map.Entry<String, List<Object>> entry : fieldVisitor.fields().entrySet()) {
fields.put(entry.getKey(), new GetField(entry.getKey(), entry.getValue()));
}
}
}
DocumentMapper docMapper = mapperService.documentMapper(type);
if (docMapper.parentFieldMapper().active()) {
String parentId = ParentFieldSubFetchPhase.getParentId(docMapper.parentFieldMapper(), docIdAndVersion.context.reader(), docIdAndVersion.docId);
if (fields == null) {
fields = new HashMap<>(1);
}
fields.put(ParentFieldMapper.NAME, new GetField(ParentFieldMapper.NAME, Collections.singletonList(parentId)));
}
if (gFields != null && gFields.length > 0) {
for (String field : gFields) {
FieldMapper fieldMapper = docMapper.mappers().smartNameFieldMapper(field);
if (fieldMapper == null) {
if (docMapper.objectMappers().get(field) != null) {
// Only fail if we know it is a object field, missing paths / fields shouldn't fail.
throw new IllegalArgumentException("field [" + field + "] isn't a leaf field");
}
}
}
}
if (!fetchSourceContext.fetchSource()) {
source = null;
} else if (fetchSourceContext.includes().length > 0 || fetchSourceContext.excludes().length > 0) {
Map<String, Object> sourceAsMap;
XContentType sourceContentType = null;
// TODO: The source might parsed and available in the sourceLookup but that one uses unordered maps so different. Do we care?
Tuple<XContentType, Map<String, Object>> typeMapTuple = XContentHelper.convertToMap(source, true);
sourceContentType = typeMapTuple.v1();
sourceAsMap = typeMapTuple.v2();
sourceAsMap = XContentMapValues.filter(sourceAsMap, fetchSourceContext.includes(), fetchSourceContext.excludes());
try {
source = XContentFactory.contentBuilder(sourceContentType).map(sourceAsMap).bytes();
} catch (IOException e) {
throw new ElasticsearchException("Failed to get type [" + type + "] and id [" + id + "] with includes/excludes set", e);
}
}
return new GetResult(shardId.getIndexName(), type, id, get.version(), get.exists(), source, fields);
}
use of org.elasticsearch.common.bytes.BytesReference in project elasticsearch by elastic.
the class SourceFieldMapper method parseCreateField.
@Override
protected void parseCreateField(ParseContext context, List<IndexableField> fields) throws IOException {
if (!enabled) {
return;
}
if (!fieldType().stored()) {
return;
}
BytesReference source = context.sourceToParse().source();
// Percolate and tv APIs may not set the source and that is ok, because these APIs will not index any data
if (source == null) {
return;
}
if (filter != null) {
// we don't update the context source if we filter, we want to keep it as is...
Tuple<XContentType, Map<String, Object>> mapTuple = XContentHelper.convertToMap(source, true, context.sourceToParse().getXContentType());
Map<String, Object> filteredSource = filter.apply(mapTuple.v2());
BytesStreamOutput bStream = new BytesStreamOutput();
XContentType contentType = mapTuple.v1();
XContentBuilder builder = XContentFactory.contentBuilder(contentType, bStream).map(filteredSource);
builder.close();
source = bStream.bytes();
}
BytesRef ref = source.toBytesRef();
fields.add(new StoredField(fieldType().name(), ref.bytes, ref.offset, ref.length));
}
use of org.elasticsearch.common.bytes.BytesReference in project elasticsearch by elastic.
the class BlobStoreRepository method writeIndexGen.
protected void writeIndexGen(final RepositoryData repositoryData, final long repositoryStateId) throws IOException {
// can not write to a read only repository
assert isReadOnly() == false;
final long currentGen = latestIndexBlobId();
if (repositoryStateId != SnapshotsInProgress.UNDEFINED_REPOSITORY_STATE_ID && currentGen != repositoryStateId) {
// repository data
throw new RepositoryException(metadata.name(), "concurrent modification of the index-N file, expected current generation [" + repositoryStateId + "], actual current generation [" + currentGen + "] - possibly due to simultaneous snapshot deletion requests");
}
final long newGen = currentGen + 1;
final BytesReference snapshotsBytes;
try (BytesStreamOutput bStream = new BytesStreamOutput()) {
try (StreamOutput stream = new OutputStreamStreamOutput(bStream)) {
XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON, stream);
repositoryData.snapshotsToXContent(builder, ToXContent.EMPTY_PARAMS);
builder.close();
}
snapshotsBytes = bStream.bytes();
}
// write the index file
writeAtomic(INDEX_FILE_PREFIX + Long.toString(newGen), snapshotsBytes);
// delete the N-2 index file if it exists, keep the previous one around as a backup
if (isReadOnly() == false && newGen - 2 >= 0) {
final String oldSnapshotIndexFile = INDEX_FILE_PREFIX + Long.toString(newGen - 2);
if (snapshotsBlobContainer.blobExists(oldSnapshotIndexFile)) {
snapshotsBlobContainer.deleteBlob(oldSnapshotIndexFile);
}
}
// write the current generation to the index-latest file
final BytesReference genBytes;
try (BytesStreamOutput bStream = new BytesStreamOutput()) {
bStream.writeLong(newGen);
genBytes = bStream.bytes();
}
if (snapshotsBlobContainer.blobExists(INDEX_LATEST_BLOB)) {
snapshotsBlobContainer.deleteBlob(INDEX_LATEST_BLOB);
}
writeAtomic(INDEX_LATEST_BLOB, genBytes);
}
Aggregations