use of org.elasticsearch.index.mapper.ParsedDocument in project elasticsearch by elastic.
the class IndexShardTests method testIndexingOperationsListeners.
public void testIndexingOperationsListeners() throws IOException {
IndexShard shard = newStartedShard(true);
indexDoc(shard, "test", "0", "{\"foo\" : \"bar\"}");
AtomicInteger preIndex = new AtomicInteger();
AtomicInteger postIndexCreate = new AtomicInteger();
AtomicInteger postIndexUpdate = new AtomicInteger();
AtomicInteger postIndexException = new AtomicInteger();
AtomicInteger preDelete = new AtomicInteger();
AtomicInteger postDelete = new AtomicInteger();
AtomicInteger postDeleteException = new AtomicInteger();
shard.close("simon says", true);
shard = reinitShard(shard, new IndexingOperationListener() {
@Override
public Engine.Index preIndex(ShardId shardId, Engine.Index operation) {
preIndex.incrementAndGet();
return operation;
}
@Override
public void postIndex(ShardId shardId, Engine.Index index, Engine.IndexResult result) {
if (result.hasFailure() == false) {
if (result.isCreated()) {
postIndexCreate.incrementAndGet();
} else {
postIndexUpdate.incrementAndGet();
}
} else {
postIndex(shardId, index, result.getFailure());
}
}
@Override
public void postIndex(ShardId shardId, Engine.Index index, Exception ex) {
postIndexException.incrementAndGet();
}
@Override
public Engine.Delete preDelete(ShardId shardId, Engine.Delete delete) {
preDelete.incrementAndGet();
return delete;
}
@Override
public void postDelete(ShardId shardId, Engine.Delete delete, Engine.DeleteResult result) {
if (result.hasFailure() == false) {
postDelete.incrementAndGet();
} else {
postDelete(shardId, delete, result.getFailure());
}
}
@Override
public void postDelete(ShardId shardId, Engine.Delete delete, Exception ex) {
postDeleteException.incrementAndGet();
}
});
recoveryShardFromStore(shard);
ParsedDocument doc = testParsedDocument("1", "test", null, new ParseContext.Document(), new BytesArray(new byte[] { 1 }), null);
Engine.Index index = new Engine.Index(new Term("_uid", doc.uid()), doc);
shard.index(index);
assertEquals(1, preIndex.get());
assertEquals(1, postIndexCreate.get());
assertEquals(0, postIndexUpdate.get());
assertEquals(0, postIndexException.get());
assertEquals(0, preDelete.get());
assertEquals(0, postDelete.get());
assertEquals(0, postDeleteException.get());
shard.index(index);
assertEquals(2, preIndex.get());
assertEquals(1, postIndexCreate.get());
assertEquals(1, postIndexUpdate.get());
assertEquals(0, postIndexException.get());
assertEquals(0, preDelete.get());
assertEquals(0, postDelete.get());
assertEquals(0, postDeleteException.get());
Engine.Delete delete = new Engine.Delete("test", "1", new Term("_uid", doc.uid()));
shard.delete(delete);
assertEquals(2, preIndex.get());
assertEquals(1, postIndexCreate.get());
assertEquals(1, postIndexUpdate.get());
assertEquals(0, postIndexException.get());
assertEquals(1, preDelete.get());
assertEquals(1, postDelete.get());
assertEquals(0, postDeleteException.get());
shard.close("Unexpected close", true);
// It will generate exception
shard.state = IndexShardState.STARTED;
try {
shard.index(index);
fail();
} catch (AlreadyClosedException e) {
}
assertEquals(2, preIndex.get());
assertEquals(1, postIndexCreate.get());
assertEquals(1, postIndexUpdate.get());
assertEquals(0, postIndexException.get());
assertEquals(1, preDelete.get());
assertEquals(1, postDelete.get());
assertEquals(0, postDeleteException.get());
try {
shard.delete(delete);
fail();
} catch (AlreadyClosedException e) {
}
assertEquals(2, preIndex.get());
assertEquals(1, postIndexCreate.get());
assertEquals(1, postIndexUpdate.get());
assertEquals(0, postIndexException.get());
assertEquals(1, preDelete.get());
assertEquals(1, postDelete.get());
assertEquals(0, postDeleteException.get());
closeShards(shard);
}
use of org.elasticsearch.index.mapper.ParsedDocument in project elasticsearch by elastic.
the class IndexShardIT method testMaybeFlush.
public void testMaybeFlush() throws Exception {
createIndex("test", Settings.builder().put(IndexSettings.INDEX_TRANSLOG_DURABILITY_SETTING.getKey(), Translog.Durability.REQUEST).build());
ensureGreen();
IndicesService indicesService = getInstanceFromNode(IndicesService.class);
IndexService test = indicesService.indexService(resolveIndex("test"));
IndexShard shard = test.getShardOrNull(0);
assertFalse(shard.shouldFlush());
client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder().put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), new ByteSizeValue(117, /* size of the operation + header&footer*/
ByteSizeUnit.BYTES)).build()).get();
client().prepareIndex("test", "test", "0").setSource("{}", XContentType.JSON).setRefreshPolicy(randomBoolean() ? IMMEDIATE : NONE).get();
assertFalse(shard.shouldFlush());
ParsedDocument doc = testParsedDocument("1", "test", null, SequenceNumbersService.UNASSIGNED_SEQ_NO, new ParseContext.Document(), new BytesArray(new byte[] { 1 }), XContentType.JSON, null);
Engine.Index index = new Engine.Index(new Term("_uid", doc.uid()), doc);
shard.index(index);
assertTrue(shard.shouldFlush());
assertEquals(2, shard.getEngine().getTranslog().totalOperations());
client().prepareIndex("test", "test", "2").setSource("{}", XContentType.JSON).setRefreshPolicy(randomBoolean() ? IMMEDIATE : NONE).get();
assertBusy(() -> {
// this is async
assertFalse(shard.shouldFlush());
});
assertEquals(0, shard.getEngine().getTranslog().totalOperations());
shard.getEngine().getTranslog().sync();
long size = shard.getEngine().getTranslog().sizeInBytes();
logger.info("--> current translog size: [{}] num_ops [{}] generation [{}]", shard.getEngine().getTranslog().sizeInBytes(), shard.getEngine().getTranslog().totalOperations(), shard.getEngine().getTranslog().getGeneration());
client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder().put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), new ByteSizeValue(size, ByteSizeUnit.BYTES)).build()).get();
client().prepareDelete("test", "test", "2").get();
logger.info("--> translog size after delete: [{}] num_ops [{}] generation [{}]", shard.getEngine().getTranslog().sizeInBytes(), shard.getEngine().getTranslog().totalOperations(), shard.getEngine().getTranslog().getGeneration());
assertBusy(() -> {
// this is async
logger.info("--> translog size on iter : [{}] num_ops [{}] generation [{}]", shard.getEngine().getTranslog().sizeInBytes(), shard.getEngine().getTranslog().totalOperations(), shard.getEngine().getTranslog().getGeneration());
assertFalse(shard.shouldFlush());
});
assertEquals(0, shard.getEngine().getTranslog().totalOperations());
}
use of org.elasticsearch.index.mapper.ParsedDocument in project elasticsearch by elastic.
the class IndexingOperationListenerTests method testListenersAreExecuted.
// this test also tests if calls are correct if one or more listeners throw exceptions
public void testListenersAreExecuted() {
AtomicInteger preIndex = new AtomicInteger();
AtomicInteger postIndex = new AtomicInteger();
AtomicInteger postIndexException = new AtomicInteger();
AtomicInteger preDelete = new AtomicInteger();
AtomicInteger postDelete = new AtomicInteger();
AtomicInteger postDeleteException = new AtomicInteger();
ShardId randomShardId = new ShardId(new Index(randomAsciiOfLength(10), randomAsciiOfLength(10)), randomIntBetween(1, 10));
IndexingOperationListener listener = new IndexingOperationListener() {
@Override
public Engine.Index preIndex(ShardId shardId, Engine.Index operation) {
assertThat(shardId, is(randomShardId));
preIndex.incrementAndGet();
return operation;
}
@Override
public void postIndex(ShardId shardId, Engine.Index index, Engine.IndexResult result) {
assertThat(shardId, is(randomShardId));
if (result.hasFailure() == false) {
postIndex.incrementAndGet();
} else {
postIndex(shardId, index, result.getFailure());
}
}
@Override
public void postIndex(ShardId shardId, Engine.Index index, Exception ex) {
assertThat(shardId, is(randomShardId));
postIndexException.incrementAndGet();
}
@Override
public Engine.Delete preDelete(ShardId shardId, Engine.Delete delete) {
assertThat(shardId, is(randomShardId));
preDelete.incrementAndGet();
return delete;
}
@Override
public void postDelete(ShardId shardId, Engine.Delete delete, Engine.DeleteResult result) {
assertThat(shardId, is(randomShardId));
if (result.hasFailure() == false) {
postDelete.incrementAndGet();
} else {
postDelete(shardId, delete, result.getFailure());
}
}
@Override
public void postDelete(ShardId shardId, Engine.Delete delete, Exception ex) {
assertThat(shardId, is(randomShardId));
postDeleteException.incrementAndGet();
}
};
IndexingOperationListener throwingListener = new IndexingOperationListener() {
@Override
public Engine.Index preIndex(ShardId shardId, Engine.Index operation) {
throw new RuntimeException();
}
@Override
public void postIndex(ShardId shardId, Engine.Index index, Engine.IndexResult result) {
throw new RuntimeException();
}
@Override
public void postIndex(ShardId shardId, Engine.Index index, Exception ex) {
throw new RuntimeException();
}
@Override
public Engine.Delete preDelete(ShardId shardId, Engine.Delete delete) {
throw new RuntimeException();
}
@Override
public void postDelete(ShardId shardId, Engine.Delete delete, Engine.DeleteResult result) {
throw new RuntimeException();
}
@Override
public void postDelete(ShardId shardId, Engine.Delete delete, Exception ex) {
throw new RuntimeException();
}
};
final List<IndexingOperationListener> indexingOperationListeners = new ArrayList<>(Arrays.asList(listener, listener));
if (randomBoolean()) {
indexingOperationListeners.add(throwingListener);
if (randomBoolean()) {
indexingOperationListeners.add(throwingListener);
}
}
Collections.shuffle(indexingOperationListeners, random());
IndexingOperationListener.CompositeListener compositeListener = new IndexingOperationListener.CompositeListener(indexingOperationListeners, logger);
ParsedDocument doc = InternalEngineTests.createParsedDoc("1", "test", null);
Engine.Delete delete = new Engine.Delete("test", "1", new Term("_uid", doc.uid()));
Engine.Index index = new Engine.Index(new Term("_uid", doc.uid()), doc);
compositeListener.postDelete(randomShardId, delete, new Engine.DeleteResult(1, SequenceNumbersService.UNASSIGNED_SEQ_NO, true));
assertEquals(0, preIndex.get());
assertEquals(0, postIndex.get());
assertEquals(0, postIndexException.get());
assertEquals(0, preDelete.get());
assertEquals(2, postDelete.get());
assertEquals(0, postDeleteException.get());
compositeListener.postDelete(randomShardId, delete, new RuntimeException());
assertEquals(0, preIndex.get());
assertEquals(0, postIndex.get());
assertEquals(0, postIndexException.get());
assertEquals(0, preDelete.get());
assertEquals(2, postDelete.get());
assertEquals(2, postDeleteException.get());
compositeListener.preDelete(randomShardId, delete);
assertEquals(0, preIndex.get());
assertEquals(0, postIndex.get());
assertEquals(0, postIndexException.get());
assertEquals(2, preDelete.get());
assertEquals(2, postDelete.get());
assertEquals(2, postDeleteException.get());
compositeListener.postIndex(randomShardId, index, new Engine.IndexResult(0, SequenceNumbersService.UNASSIGNED_SEQ_NO, false));
assertEquals(0, preIndex.get());
assertEquals(2, postIndex.get());
assertEquals(0, postIndexException.get());
assertEquals(2, preDelete.get());
assertEquals(2, postDelete.get());
assertEquals(2, postDeleteException.get());
compositeListener.postIndex(randomShardId, index, new RuntimeException());
assertEquals(0, preIndex.get());
assertEquals(2, postIndex.get());
assertEquals(2, postIndexException.get());
assertEquals(2, preDelete.get());
assertEquals(2, postDelete.get());
assertEquals(2, postDeleteException.get());
compositeListener.preIndex(randomShardId, index);
assertEquals(2, preIndex.get());
assertEquals(2, postIndex.get());
assertEquals(2, postIndexException.get());
assertEquals(2, preDelete.get());
assertEquals(2, postDelete.get());
assertEquals(2, postDeleteException.get());
}
use of org.elasticsearch.index.mapper.ParsedDocument in project elasticsearch by elastic.
the class RefreshListenersTests method index.
private Engine.IndexResult index(String id, String testFieldValue) throws IOException {
String type = "test";
String uid = type + ":" + id;
Document document = new Document();
document.add(new TextField("test", testFieldValue, Field.Store.YES));
Field uidField = new Field("_uid", Uid.createUid(type, id), UidFieldMapper.Defaults.FIELD_TYPE);
Field versionField = new NumericDocValuesField("_version", Versions.MATCH_ANY);
SeqNoFieldMapper.SequenceID seqID = SeqNoFieldMapper.SequenceID.emptySeqID();
document.add(uidField);
document.add(versionField);
document.add(seqID.seqNo);
document.add(seqID.seqNoDocValue);
document.add(seqID.primaryTerm);
BytesReference source = new BytesArray(new byte[] { 1 });
ParsedDocument doc = new ParsedDocument(versionField, seqID, id, type, null, Arrays.asList(document), source, XContentType.JSON, null);
Engine.Index index = new Engine.Index(new Term("_uid", doc.uid()), doc);
return engine.index(index);
}
use of org.elasticsearch.index.mapper.ParsedDocument in project elasticsearch by elastic.
the class TranslogTests method testTranslogOpSerialization.
public void testTranslogOpSerialization() throws Exception {
BytesReference B_1 = new BytesArray(new byte[] { 1 });
SeqNoFieldMapper.SequenceID seqID = SeqNoFieldMapper.SequenceID.emptySeqID();
assert Version.CURRENT.major <= 6 : "Using UNASSIGNED_SEQ_NO can be removed in 7.0, because 6.0+ nodes have actual sequence numbers";
long randomSeqNum = randomBoolean() ? SequenceNumbersService.UNASSIGNED_SEQ_NO : randomNonNegativeLong();
long randomPrimaryTerm = randomBoolean() ? 0 : randomNonNegativeLong();
seqID.seqNo.setLongValue(randomSeqNum);
seqID.seqNoDocValue.setLongValue(randomSeqNum);
seqID.primaryTerm.setLongValue(randomPrimaryTerm);
Field uidField = new Field("_uid", Uid.createUid("test", "1"), UidFieldMapper.Defaults.FIELD_TYPE);
Field versionField = new NumericDocValuesField("_version", 1);
Document document = new Document();
document.add(new TextField("value", "test", Field.Store.YES));
document.add(uidField);
document.add(versionField);
document.add(seqID.seqNo);
document.add(seqID.seqNoDocValue);
document.add(seqID.primaryTerm);
ParsedDocument doc = new ParsedDocument(versionField, seqID, "1", "type", null, Arrays.asList(document), B_1, XContentType.JSON, null);
Engine.Index eIndex = new Engine.Index(newUid(doc), doc, randomSeqNum, randomPrimaryTerm, 1, VersionType.INTERNAL, Origin.PRIMARY, 0, 0, false);
Engine.IndexResult eIndexResult = new Engine.IndexResult(1, randomSeqNum, true);
Translog.Index index = new Translog.Index(eIndex, eIndexResult);
BytesStreamOutput out = new BytesStreamOutput();
index.writeTo(out);
StreamInput in = out.bytes().streamInput();
Translog.Index serializedIndex = new Translog.Index(in);
assertEquals(index, serializedIndex);
Engine.Delete eDelete = new Engine.Delete(doc.type(), doc.id(), newUid(doc), randomSeqNum, randomPrimaryTerm, 2, VersionType.INTERNAL, Origin.PRIMARY, 0);
Engine.DeleteResult eDeleteResult = new Engine.DeleteResult(2, randomSeqNum, true);
Translog.Delete delete = new Translog.Delete(eDelete, eDeleteResult);
out = new BytesStreamOutput();
delete.writeTo(out);
in = out.bytes().streamInput();
Translog.Delete serializedDelete = new Translog.Delete(in);
assertEquals(delete, serializedDelete);
}
Aggregations