use of org.janusgraph.diskstorage.log.Message in project janusgraph by JanusGraph.
the class StandardJanusGraph method commit.
public void commit(final Collection<InternalRelation> addedRelations, final Collection<InternalRelation> deletedRelations, final StandardJanusGraphTx tx) {
if (addedRelations.isEmpty() && deletedRelations.isEmpty())
return;
// 1. Finalize transaction
log.debug("Saving transaction. Added {}, removed {}", addedRelations.size(), deletedRelations.size());
if (!tx.getConfiguration().hasCommitTime())
tx.getConfiguration().setCommitTime(times.getTime());
final Instant txTimestamp = tx.getConfiguration().getCommitTime();
final long transactionId = txCounter.incrementAndGet();
// 2. Assign JanusGraphVertex IDs
if (!tx.getConfiguration().hasAssignIDsImmediately())
idAssigner.assignIDs(addedRelations);
// 3. Commit
BackendTransaction mutator = tx.getTxHandle();
final boolean acquireLocks = tx.getConfiguration().hasAcquireLocks();
final boolean hasTxIsolation = backend.getStoreFeatures().hasTxIsolation();
final boolean logTransaction = config.hasLogTransactions() && !tx.getConfiguration().hasEnabledBatchLoading();
final KCVSLog txLog = logTransaction ? backend.getSystemTxLog() : null;
final TransactionLogHeader txLogHeader = new TransactionLogHeader(transactionId, txTimestamp, times);
ModificationSummary commitSummary;
try {
// 3.1 Log transaction (write-ahead log) if enabled
if (logTransaction) {
// [FAILURE] Inability to log transaction fails the transaction by escalation since it's likely due to unavailability of primary
// storage backend.
Preconditions.checkState(txLog != null, "Transaction log is null");
txLog.add(txLogHeader.serializeModifications(serializer, LogTxStatus.PRECOMMIT, tx, addedRelations, deletedRelations), txLogHeader.getLogKey());
}
// 3.2 Commit schema elements and their associated relations in a separate transaction if backend does not support
// transactional isolation
boolean hasSchemaElements = !Iterables.isEmpty(Iterables.filter(deletedRelations, SCHEMA_FILTER)) || !Iterables.isEmpty(Iterables.filter(addedRelations, SCHEMA_FILTER));
Preconditions.checkArgument(!hasSchemaElements || (!tx.getConfiguration().hasEnabledBatchLoading() && acquireLocks), "Attempting to create schema elements in inconsistent state");
if (hasSchemaElements && !hasTxIsolation) {
/*
* On storage without transactional isolation, create separate
* backend transaction for schema aspects to make sure that
* those are persisted prior to and independently of other
* mutations in the tx. If the storage supports transactional
* isolation, then don't create a separate tx.
*/
final BackendTransaction schemaMutator = openBackendTransaction(tx);
try {
// [FAILURE] If the preparation throws an exception abort directly - nothing persisted since batch-loading cannot be enabled for schema elements
commitSummary = prepareCommit(addedRelations, deletedRelations, SCHEMA_FILTER, schemaMutator, tx, acquireLocks);
assert commitSummary.hasModifications && !commitSummary.has2iModifications;
} catch (Throwable e) {
// Roll back schema tx and escalate exception
schemaMutator.rollback();
throw e;
}
try {
schemaMutator.commit();
} catch (Throwable e) {
// [FAILURE] Primary persistence failed => abort and escalate exception, nothing should have been persisted
log.error("Could not commit transaction [" + transactionId + "] due to storage exception in system-commit", e);
throw e;
}
}
// [FAILURE] Exceptions during preparation here cause the entire transaction to fail on transactional systems
// or just the non-system part on others. Nothing has been persisted unless batch-loading
commitSummary = prepareCommit(addedRelations, deletedRelations, hasTxIsolation ? NO_FILTER : NO_SCHEMA_FILTER, mutator, tx, acquireLocks);
if (commitSummary.hasModifications) {
String logTxIdentifier = tx.getConfiguration().getLogIdentifier();
boolean hasSecondaryPersistence = logTxIdentifier != null || commitSummary.has2iModifications;
// This should not throw an exception since the mutations are just cached. If it does, it will be escalated since its critical
if (logTransaction) {
txLog.add(txLogHeader.serializePrimary(serializer, hasSecondaryPersistence ? LogTxStatus.PRIMARY_SUCCESS : LogTxStatus.COMPLETE_SUCCESS), txLogHeader.getLogKey(), mutator.getTxLogPersistor());
}
try {
mutator.commitStorage();
} catch (Throwable e) {
// [FAILURE] If primary storage persistence fails abort directly (only schema could have been persisted)
log.error("Could not commit transaction [" + transactionId + "] due to storage exception in commit", e);
throw e;
}
if (hasSecondaryPersistence) {
LogTxStatus status = LogTxStatus.SECONDARY_SUCCESS;
Map<String, Throwable> indexFailures = ImmutableMap.of();
boolean userlogSuccess = true;
try {
// 2. Commit indexes - [FAILURE] all exceptions are collected and logged but nothing is aborted
indexFailures = mutator.commitIndexes();
if (!indexFailures.isEmpty()) {
status = LogTxStatus.SECONDARY_FAILURE;
for (Map.Entry<String, Throwable> entry : indexFailures.entrySet()) {
log.error("Error while committing index mutations for transaction [" + transactionId + "] on index: " + entry.getKey(), entry.getValue());
}
}
// 3. Log transaction if configured - [FAILURE] is recorded but does not cause exception
if (logTxIdentifier != null) {
try {
userlogSuccess = false;
final Log userLog = backend.getUserLog(logTxIdentifier);
Future<Message> env = userLog.add(txLogHeader.serializeModifications(serializer, LogTxStatus.USER_LOG, tx, addedRelations, deletedRelations));
if (env.isDone()) {
try {
env.get();
} catch (ExecutionException ex) {
throw ex.getCause();
}
}
userlogSuccess = true;
} catch (Throwable e) {
status = LogTxStatus.SECONDARY_FAILURE;
log.error("Could not user-log committed transaction [" + transactionId + "] to " + logTxIdentifier, e);
}
}
} finally {
if (logTransaction) {
// needs to be cleaned up later
try {
txLog.add(txLogHeader.serializeSecondary(serializer, status, indexFailures, userlogSuccess), txLogHeader.getLogKey());
} catch (Throwable e) {
log.error("Could not tx-log secondary persistence status on transaction [" + transactionId + "]", e);
}
}
}
} else {
// This just closes the transaction since there are no modifications
mutator.commitIndexes();
}
} else {
// Just commit everything at once
// [FAILURE] This case only happens when there are no non-system mutations in which case all changes
// are already flushed. Hence, an exception here is unlikely and should abort
mutator.commit();
}
} catch (Throwable e) {
log.error("Could not commit transaction [" + transactionId + "] due to exception", e);
try {
// Clean up any left-over transaction handles
mutator.rollback();
} catch (Throwable e2) {
log.error("Could not roll-back transaction [" + transactionId + "] after failure due to exception", e2);
}
if (e instanceof RuntimeException)
throw (RuntimeException) e;
else
throw new JanusGraphException("Unexpected exception", e);
}
}
use of org.janusgraph.diskstorage.log.Message in project janusgraph by JanusGraph.
the class JanusGraphTest method simpleLogTest.
public void simpleLogTest(final boolean withLogFailure) throws InterruptedException {
final String userLogName = "test";
final Serializer serializer = graph.getDataSerializer();
final EdgeSerializer edgeSerializer = graph.getEdgeSerializer();
final TimestampProvider times = graph.getConfiguration().getTimestampProvider();
final Instant startTime = times.getTime();
clopen(option(SYSTEM_LOG_TRANSACTIONS), true, option(LOG_BACKEND, USER_LOG), (withLogFailure ? TestMockLog.class.getName() : LOG_BACKEND.getDefaultValue()), option(TestMockLog.LOG_MOCK_FAILADD, USER_LOG), withLogFailure, option(KCVSLog.LOG_READ_LAG_TIME, USER_LOG), Duration.ofMillis(50), option(LOG_READ_INTERVAL, USER_LOG), Duration.ofMillis(250), option(LOG_SEND_DELAY, USER_LOG), Duration.ofMillis(100), option(KCVSLog.LOG_READ_LAG_TIME, TRANSACTION_LOG), Duration.ofMillis(50), option(LOG_READ_INTERVAL, TRANSACTION_LOG), Duration.ofMillis(250), option(MAX_COMMIT_TIME), Duration.ofSeconds(1));
final String instanceId = graph.getConfiguration().getUniqueGraphId();
PropertyKey weight = tx.makePropertyKey("weight").dataType(Float.class).cardinality(Cardinality.SINGLE).make();
EdgeLabel knows = tx.makeEdgeLabel("knows").make();
JanusGraphVertex n1 = tx.addVertex("weight", 10.5);
newTx();
final Instant[] txTimes = new Instant[4];
// Transaction with custom user log name
txTimes[0] = times.getTime();
JanusGraphTransaction tx2 = graph.buildTransaction().logIdentifier(userLogName).start();
JanusGraphVertex v1 = tx2.addVertex("weight", 111.1);
v1.addEdge("knows", v1);
tx2.commit();
final long v1id = getId(v1);
txTimes[1] = times.getTime();
tx2 = graph.buildTransaction().logIdentifier(userLogName).start();
JanusGraphVertex v2 = tx2.addVertex("weight", 222.2);
v2.addEdge("knows", getV(tx2, v1id));
tx2.commit();
final long v2id = getId(v2);
// Only read tx
tx2 = graph.buildTransaction().logIdentifier(userLogName).start();
v1 = getV(tx2, v1id);
assertEquals(111.1, v1.<Float>value("weight").doubleValue(), 0.01);
assertEquals(222.2, getV(tx2, v2).<Float>value("weight").doubleValue(), 0.01);
tx2.commit();
// Deleting transaction
txTimes[2] = times.getTime();
tx2 = graph.buildTransaction().logIdentifier(userLogName).start();
v2 = getV(tx2, v2id);
assertEquals(222.2, v2.<Float>value("weight").doubleValue(), 0.01);
v2.remove();
tx2.commit();
// Edge modifying transaction
txTimes[3] = times.getTime();
tx2 = graph.buildTransaction().logIdentifier(userLogName).start();
v1 = getV(tx2, v1id);
assertEquals(111.1, v1.<Float>value("weight").doubleValue(), 0.01);
final Edge e = Iterables.getOnlyElement(v1.query().direction(Direction.OUT).labels("knows").edges());
assertFalse(e.property("weight").isPresent());
e.property("weight", 44.4);
tx2.commit();
close();
final Instant endTime = times.getTime();
final ReadMarker startMarker = ReadMarker.fromTime(startTime);
final Log transactionLog = openTxLog();
final Log userLog = openUserLog(userLogName);
final EnumMap<LogTxStatus, AtomicInteger> txMsgCounter = new EnumMap<>(LogTxStatus.class);
for (final LogTxStatus status : LogTxStatus.values()) txMsgCounter.put(status, new AtomicInteger(0));
final AtomicInteger userLogMeta = new AtomicInteger(0);
transactionLog.registerReader(startMarker, new MessageReader() {
@Override
public void read(Message message) {
final Instant msgTime = message.getTimestamp();
assertTrue(msgTime.isAfter(startTime) || msgTime.equals(startTime));
assertNotNull(message.getSenderId());
final TransactionLogHeader.Entry txEntry = TransactionLogHeader.parse(message.getContent(), serializer, times);
final TransactionLogHeader header = txEntry.getHeader();
// System.out.println(header.getTimestamp(TimeUnit.MILLISECONDS));
assertTrue(header.getTimestamp().isAfter(startTime) || header.getTimestamp().equals(startTime));
assertTrue(header.getTimestamp().isBefore(msgTime) || header.getTimestamp().equals(msgTime));
assertNotNull(txEntry.getMetadata());
assertNull(txEntry.getMetadata().get(LogTxMeta.GROUPNAME));
final LogTxStatus status = txEntry.getStatus();
if (status == LogTxStatus.PRECOMMIT) {
assertTrue(txEntry.hasContent());
final Object logId = txEntry.getMetadata().get(LogTxMeta.LOG_ID);
if (logId != null) {
assertTrue(logId instanceof String);
assertEquals(userLogName, logId);
userLogMeta.incrementAndGet();
}
} else if (withLogFailure) {
assertTrue(status.isPrimarySuccess() || status == LogTxStatus.SECONDARY_FAILURE);
if (status == LogTxStatus.SECONDARY_FAILURE) {
final TransactionLogHeader.SecondaryFailures secFail = txEntry.getContentAsSecondaryFailures(serializer);
assertTrue(secFail.failedIndexes.isEmpty());
assertTrue(secFail.userLogFailure);
}
} else {
assertFalse(txEntry.hasContent());
assertTrue(status.isSuccess());
}
txMsgCounter.get(txEntry.getStatus()).incrementAndGet();
}
@Override
public void updateState() {
}
});
final EnumMap<Change, AtomicInteger> userChangeCounter = new EnumMap<>(Change.class);
for (final Change change : Change.values()) userChangeCounter.put(change, new AtomicInteger(0));
final AtomicInteger userLogMsgCounter = new AtomicInteger(0);
userLog.registerReader(startMarker, new MessageReader() {
@Override
public void read(Message message) {
final Instant msgTime = message.getTimestamp();
assertTrue(msgTime.isAfter(startTime) || msgTime.equals(startTime));
assertNotNull(message.getSenderId());
final StaticBuffer content = message.getContent();
assertTrue(content != null && content.length() > 0);
final TransactionLogHeader.Entry transactionEntry = TransactionLogHeader.parse(content, serializer, times);
final Instant txTime = transactionEntry.getHeader().getTimestamp();
assertTrue(txTime.isBefore(msgTime) || txTime.equals(msgTime));
assertTrue(txTime.isAfter(startTime) || txTime.equals(msgTime));
final long transactionId = transactionEntry.getHeader().getId();
assertTrue(transactionId > 0);
transactionEntry.getContentAsModifications(serializer).forEach(modification -> {
assertTrue(modification.state == Change.ADDED || modification.state == Change.REMOVED);
userChangeCounter.get(modification.state).incrementAndGet();
});
userLogMsgCounter.incrementAndGet();
}
@Override
public void updateState() {
}
});
Thread.sleep(4000);
assertEquals(5, txMsgCounter.get(LogTxStatus.PRECOMMIT).get());
assertEquals(4, txMsgCounter.get(LogTxStatus.PRIMARY_SUCCESS).get());
assertEquals(1, txMsgCounter.get(LogTxStatus.COMPLETE_SUCCESS).get());
assertEquals(4, userLogMeta.get());
if (withLogFailure)
assertEquals(4, txMsgCounter.get(LogTxStatus.SECONDARY_FAILURE).get());
else
assertEquals(4, txMsgCounter.get(LogTxStatus.SECONDARY_SUCCESS).get());
// User-Log
if (withLogFailure) {
assertEquals(0, userLogMsgCounter.get());
} else {
assertEquals(4, userLogMsgCounter.get());
assertEquals(7, userChangeCounter.get(Change.ADDED).get());
assertEquals(4, userChangeCounter.get(Change.REMOVED).get());
}
clopen(option(VERBOSE_TX_RECOVERY), true);
/*
Transaction Recovery
*/
final TransactionRecovery recovery = JanusGraphFactory.startTransactionRecovery(graph, startTime);
/*
Use user log processing framework
*/
final AtomicInteger userLogCount = new AtomicInteger(0);
final LogProcessorFramework userLogs = JanusGraphFactory.openTransactionLog(graph);
userLogs.addLogProcessor(userLogName).setStartTime(startTime).setRetryAttempts(1).addProcessor((tx, txId, changes) -> {
assertEquals(instanceId, txId.getInstanceId());
// Just some reasonable upper bound
assertTrue(txId.getTransactionId() > 0 && txId.getTransactionId() < 100);
final Instant txTime = txId.getTransactionTime();
// Times should be within a second
assertTrue(String.format("tx timestamp %s not between start %s and end time %s", txTime, startTime, endTime), (txTime.isAfter(startTime) || txTime.equals(startTime)) && (txTime.isBefore(endTime) || txTime.equals(endTime)));
assertTrue(tx.containsRelationType("knows"));
assertTrue(tx.containsRelationType("weight"));
final EdgeLabel knows1 = tx.getEdgeLabel("knows");
final PropertyKey weight1 = tx.getPropertyKey("weight");
Instant txTimeMicro = txId.getTransactionTime();
int txNo;
if (txTimeMicro.isBefore(txTimes[1])) {
txNo = 1;
// v1 addition transaction
assertEquals(1, Iterables.size(changes.getVertices(Change.ADDED)));
assertEquals(0, Iterables.size(changes.getVertices(Change.REMOVED)));
assertEquals(1, Iterables.size(changes.getVertices(Change.ANY)));
assertEquals(2, Iterables.size(changes.getRelations(Change.ADDED)));
assertEquals(1, Iterables.size(changes.getRelations(Change.ADDED, knows1)));
assertEquals(1, Iterables.size(changes.getRelations(Change.ADDED, weight1)));
assertEquals(2, Iterables.size(changes.getRelations(Change.ANY)));
assertEquals(0, Iterables.size(changes.getRelations(Change.REMOVED)));
final JanusGraphVertex v = Iterables.getOnlyElement(changes.getVertices(Change.ADDED));
assertEquals(v1id, getId(v));
final VertexProperty<Float> p = Iterables.getOnlyElement(changes.getProperties(v, Change.ADDED, "weight"));
assertEquals(111.1, p.value().doubleValue(), 0.01);
assertEquals(1, Iterables.size(changes.getEdges(v, Change.ADDED, OUT)));
assertEquals(1, Iterables.size(changes.getEdges(v, Change.ADDED, BOTH)));
} else if (txTimeMicro.isBefore(txTimes[2])) {
txNo = 2;
// v2 addition transaction
assertEquals(1, Iterables.size(changes.getVertices(Change.ADDED)));
assertEquals(0, Iterables.size(changes.getVertices(Change.REMOVED)));
assertEquals(2, Iterables.size(changes.getVertices(Change.ANY)));
assertEquals(2, Iterables.size(changes.getRelations(Change.ADDED)));
assertEquals(1, Iterables.size(changes.getRelations(Change.ADDED, knows1)));
assertEquals(1, Iterables.size(changes.getRelations(Change.ADDED, weight1)));
assertEquals(2, Iterables.size(changes.getRelations(Change.ANY)));
assertEquals(0, Iterables.size(changes.getRelations(Change.REMOVED)));
final JanusGraphVertex v = Iterables.getOnlyElement(changes.getVertices(Change.ADDED));
assertEquals(v2id, getId(v));
final VertexProperty<Float> p = Iterables.getOnlyElement(changes.getProperties(v, Change.ADDED, "weight"));
assertEquals(222.2, p.value().doubleValue(), 0.01);
assertEquals(1, Iterables.size(changes.getEdges(v, Change.ADDED, OUT)));
assertEquals(1, Iterables.size(changes.getEdges(v, Change.ADDED, BOTH)));
} else if (txTimeMicro.isBefore(txTimes[3])) {
txNo = 3;
// v2 deletion transaction
assertEquals(0, Iterables.size(changes.getVertices(Change.ADDED)));
assertEquals(1, Iterables.size(changes.getVertices(Change.REMOVED)));
assertEquals(2, Iterables.size(changes.getVertices(Change.ANY)));
assertEquals(0, Iterables.size(changes.getRelations(Change.ADDED)));
assertEquals(2, Iterables.size(changes.getRelations(Change.REMOVED)));
assertEquals(1, Iterables.size(changes.getRelations(Change.REMOVED, knows1)));
assertEquals(1, Iterables.size(changes.getRelations(Change.REMOVED, weight1)));
assertEquals(2, Iterables.size(changes.getRelations(Change.ANY)));
final JanusGraphVertex v = Iterables.getOnlyElement(changes.getVertices(Change.REMOVED));
assertEquals(v2id, getId(v));
final VertexProperty<Float> p = Iterables.getOnlyElement(changes.getProperties(v, Change.REMOVED, "weight"));
assertEquals(222.2, p.value().doubleValue(), 0.01);
assertEquals(1, Iterables.size(changes.getEdges(v, Change.REMOVED, OUT)));
assertEquals(0, Iterables.size(changes.getEdges(v, Change.ADDED, BOTH)));
} else {
txNo = 4;
// v1 edge modification
assertEquals(0, Iterables.size(changes.getVertices(Change.ADDED)));
assertEquals(0, Iterables.size(changes.getVertices(Change.REMOVED)));
assertEquals(1, Iterables.size(changes.getVertices(Change.ANY)));
assertEquals(1, Iterables.size(changes.getRelations(Change.ADDED)));
assertEquals(1, Iterables.size(changes.getRelations(Change.REMOVED)));
assertEquals(1, Iterables.size(changes.getRelations(Change.REMOVED, knows1)));
assertEquals(2, Iterables.size(changes.getRelations(Change.ANY)));
final JanusGraphVertex v = Iterables.getOnlyElement(changes.getVertices(Change.ANY));
assertEquals(v1id, getId(v));
JanusGraphEdge e1 = Iterables.getOnlyElement(changes.getEdges(v, Change.REMOVED, Direction.OUT, "knows"));
assertFalse(e1.property("weight").isPresent());
assertEquals(v, e1.vertex(Direction.IN));
e1 = Iterables.getOnlyElement(changes.getEdges(v, Change.ADDED, Direction.OUT, "knows"));
assertEquals(44.4, e1.<Float>value("weight").doubleValue(), 0.01);
assertEquals(v, e1.vertex(Direction.IN));
}
// See only current state of graph in transaction
final JanusGraphVertex v11 = getV(tx, v1id);
assertNotNull(v11);
assertTrue(v11.isLoaded());
if (txNo != 2) {
// In the transaction that adds v2, v2 will be considered "loaded"
assertMissing(tx, v2id);
// assertTrue(txNo + " - " + v2, v2 == null || v2.isRemoved());
}
assertEquals(111.1, v11.<Float>value("weight").doubleValue(), 0.01);
assertCount(1, v11.query().direction(Direction.OUT).edges());
userLogCount.incrementAndGet();
}).build();
// wait
Thread.sleep(22000L);
recovery.shutdown();
long[] recoveryStats = ((StandardTransactionLogProcessor) recovery).getStatistics();
if (withLogFailure) {
assertEquals(1, recoveryStats[0]);
assertEquals(4, recoveryStats[1]);
} else {
assertEquals(5, recoveryStats[0]);
assertEquals(0, recoveryStats[1]);
}
userLogs.removeLogProcessor(userLogName);
userLogs.shutdown();
assertEquals(4, userLogCount.get());
}
Aggregations