use of com.couchbase.connector.cluster.consul.AsyncTask in project couchbase-elasticsearch-connector by couchbase.
the class BasicReplicationTest method createDeleteReject.
@Test
public void createDeleteReject() throws Throwable {
try (TestCouchbaseClient cb = new TestCouchbaseClient(commonConfig)) {
final Bucket bucket = cb.createTempBucket(couchbase);
final Collection collection = bucket.defaultCollection();
final ImmutableConnectorConfig config = withBucketName(commonConfig, bucket.name());
try (TestEsClient es = new TestEsClient(config);
AsyncTask connector = AsyncTask.run(() -> ElasticsearchConnector.run(config))) {
assertIndexInferredFromDocumentId(bucket, es);
// Create two documents in the same vbucket to make sure we're not conflating seqno and revision number.
// This first one has a seqno and revision number that are the same... not useful for the test.
final String firstKeyInVbucket = forceKeyToPartition("createdFirst", 0, 1024).get();
upsertWithRetry(bucket, JsonDocument.create(firstKeyInVbucket, JsonObject.create()));
// Here's the document we're going to test! Its seqno should be different than document revision.
final String blueKey = forceKeyToPartition("color:blue", 0, 1024).get();
final MutationResult upsertResult = upsertWithRetry(bucket, JsonDocument.create(blueKey, JsonObject.create().put("hex", "0000ff")));
final JsonNode content = es.waitForDocument(CATCH_ALL_INDEX, blueKey);
System.out.println(content);
final long expectedDocumentRevision = 1;
final JsonNode meta = content.path("meta");
assertEquals(blueKey, meta.path("id").textValue());
assertEquals(expectedDocumentRevision, meta.path("revSeqno").longValue());
assertTrue(meta.path("lockTime").isIntegralNumber());
assertEquals(0, meta.path("lockTime").longValue());
assertTrue(meta.path("expiration").isIntegralNumber());
assertEquals(0, meta.path("expiration").longValue());
assertThat(meta.path("rev").textValue()).startsWith(expectedDocumentRevision + "-");
assertTrue(meta.path("flags").isIntegralNumber());
assertEquals(upsertResult.cas(), meta.path("cas").longValue());
MutationToken mutationToken = upsertResult.mutationToken().orElseThrow(() -> new AssertionError("expected mutation token"));
assertEquals(mutationToken.sequenceNumber(), meta.path("seqno").longValue());
assertEquals(mutationToken.partitionID(), meta.path("vbucket").longValue());
assertEquals(mutationToken.partitionUUID(), meta.path("vbuuid").longValue());
assertEquals("0000ff", content.path("doc").path("hex").textValue());
// Make sure deletions are propagated to elasticsearch
collection.remove(blueKey);
es.waitForDeletion(CATCH_ALL_INDEX, blueKey);
// Create an incompatible document (different type for "hex" field, Object instead of String)
final String redKey = "color:red";
upsertWithRetry(bucket, JsonDocument.create(redKey, JsonObject.create().put("hex", JsonObject.create().put("red", "ff").put("green", "00").put("blue", "00"))));
assertDocumentRejected(es, CATCH_ALL_INDEX, redKey, "mapper_parsing_exception");
// Elasticsearch doesn't support BigInteger fields. This error surfaces when creating the index request,
// before the request is sent to Elasticsearch. Make sure we trapped the error and converted it to a rejection.
final String bigIntKey = "veryLargeNumber";
upsertWithRetry(bucket, JsonDocument.create(bigIntKey, JsonObject.create().put("number", new BigInteger("17626319910530664276"))));
assertDocumentRejected(es, CATCH_ALL_INDEX, bigIntKey, "mapper_parsing_exception");
}
}
}
Aggregations