use of org.janusgraph.diskstorage.BackendException in project janusgraph by JanusGraph.
the class StandardJanusGraph method newTransaction.
public StandardJanusGraphTx newTransaction(final TransactionConfiguration configuration) {
if (!isOpen)
ExceptionFactory.graphShutdown();
try {
StandardJanusGraphTx tx = new StandardJanusGraphTx(this, configuration);
tx.setBackendTransaction(openBackendTransaction(tx));
openTransactions.add(tx);
return tx;
} catch (BackendException e) {
throw new JanusGraphException("Could not start new transaction", e);
}
}
use of org.janusgraph.diskstorage.BackendException in project janusgraph by JanusGraph.
the class StandardLogProcessorFramework method shutdown.
@Override
public synchronized void shutdown() throws JanusGraphException {
if (!isOpen)
return;
isOpen = false;
try {
ExceptionWrapper exceptionWrapper = new ExceptionWrapper();
for (Log log : processorLogs.values()) {
ExecuteUtil.executeWithCatching(log::close, exceptionWrapper);
}
ExecuteUtil.throwIfException(exceptionWrapper);
processorLogs.clear();
} catch (BackendException e) {
throw new JanusGraphException(e);
}
}
use of org.janusgraph.diskstorage.BackendException in project janusgraph by JanusGraph.
the class IndexRepairJob method workerIterationEnd.
@Override
public void workerIterationEnd(final ScanMetrics metrics) {
try {
if (index instanceof JanusGraphIndex) {
BackendTransaction mutator = writeTx.getTxHandle();
IndexType indexType = managementSystem.getSchemaVertex(index).asIndexType();
if (indexType.isMixedIndex() && documentsPerStore.size() > 0) {
mutator.getIndexTransaction(indexType.getBackingIndexName()).restore(documentsPerStore);
documentsPerStore = new HashMap<>();
}
}
} catch (BackendException e) {
throw new JanusGraphException(e.getMessage(), e);
} finally {
super.workerIterationEnd(metrics);
}
}
use of org.janusgraph.diskstorage.BackendException in project janusgraph by JanusGraph.
the class ElasticSearchIndex method getNewDocument.
public Map<String, Object> getNewDocument(final List<IndexEntry> additions, KeyInformation.StoreRetriever information) throws BackendException {
// JSON writes duplicate fields one after another, which forces us
// at this stage to make de-duplication on the IndexEntry list. We don't want to pay the
// price map storage on the Mutation level because none of other backends need that.
final Multimap<String, IndexEntry> unique = LinkedListMultimap.create();
for (final IndexEntry e : additions) {
unique.put(e.field, e);
}
final Map<String, Object> doc = new HashMap<>();
for (final Map.Entry<String, Collection<IndexEntry>> add : unique.asMap().entrySet()) {
final KeyInformation keyInformation = information.get(add.getKey());
final Object value;
switch(keyInformation.getCardinality()) {
case SINGLE:
value = convertToEsType(Iterators.getLast(add.getValue().iterator()).value, Mapping.getMapping(keyInformation));
break;
case SET:
case LIST:
value = add.getValue().stream().map(v -> convertToEsType(v.value, Mapping.getMapping(keyInformation))).filter(v -> {
Preconditions.checkArgument(!(v instanceof byte[]), "Collections not supported for %s", add.getKey());
return true;
}).toArray();
break;
default:
value = null;
break;
}
doc.put(add.getKey(), value);
if (hasDualStringMapping(information.get(add.getKey())) && keyInformation.getDataType() == String.class) {
doc.put(getDualMappingName(add.getKey()), value);
}
}
return doc;
}
use of org.janusgraph.diskstorage.BackendException in project janusgraph by JanusGraph.
the class ElasticSearchIndex method mutate.
@Override
public void mutate(Map<String, Map<String, IndexMutation>> mutations, KeyInformation.IndexRetriever information, BaseTransaction tx) throws BackendException {
final List<ElasticSearchMutation> requests = new ArrayList<>();
try {
for (final Map.Entry<String, Map<String, IndexMutation>> stores : mutations.entrySet()) {
final List<ElasticSearchMutation> requestByStore = new ArrayList<>();
final String storeName = stores.getKey();
final String indexStoreName = getIndexStoreName(storeName);
for (final Map.Entry<String, IndexMutation> entry : stores.getValue().entrySet()) {
final String documentId = entry.getKey();
final IndexMutation mutation = entry.getValue();
assert mutation.isConsolidated();
Preconditions.checkArgument(!(mutation.isNew() && mutation.isDeleted()));
Preconditions.checkArgument(!mutation.isNew() || !mutation.hasDeletions());
Preconditions.checkArgument(!mutation.isDeleted() || !mutation.hasAdditions());
// Deletions first
if (mutation.hasDeletions()) {
if (mutation.isDeleted()) {
log.trace("Deleting entire document {}", documentId);
requestByStore.add(ElasticSearchMutation.createDeleteRequest(indexStoreName, storeName, documentId));
} else {
List<Map<String, Object>> params = getParameters(information.get(storeName), mutation.getDeletions(), true);
Map doc = compat.prepareStoredScript(parameterizedDeletionScriptId, params).build();
log.trace("Deletion script {} with params {}", PARAMETERIZED_DELETION_SCRIPT, params);
requestByStore.add(ElasticSearchMutation.createUpdateRequest(indexStoreName, storeName, documentId, doc));
}
}
if (mutation.hasAdditions()) {
if (mutation.isNew()) {
// Index
log.trace("Adding entire document {}", documentId);
final Map<String, Object> source = getNewDocument(mutation.getAdditions(), information.get(storeName));
requestByStore.add(ElasticSearchMutation.createIndexRequest(indexStoreName, storeName, documentId, source));
} else {
final Map upsert;
if (!mutation.hasDeletions()) {
upsert = getNewDocument(mutation.getAdditions(), information.get(storeName));
} else {
upsert = null;
}
List<Map<String, Object>> params = getParameters(information.get(storeName), mutation.getAdditions(), false, Cardinality.SINGLE);
if (!params.isEmpty()) {
ImmutableMap.Builder builder = compat.prepareStoredScript(parameterizedAdditionScriptId, params);
requestByStore.add(ElasticSearchMutation.createUpdateRequest(indexStoreName, storeName, documentId, builder, upsert));
log.trace("Adding script {} with params {}", PARAMETERIZED_ADDITION_SCRIPT, params);
}
final Map<String, Object> doc = getAdditionDoc(information, storeName, mutation);
if (!doc.isEmpty()) {
final ImmutableMap.Builder builder = ImmutableMap.builder().put(ES_DOC_KEY, doc);
requestByStore.add(ElasticSearchMutation.createUpdateRequest(indexStoreName, storeName, documentId, builder, upsert));
log.trace("Adding update {}", doc);
}
}
}
}
if (!requestByStore.isEmpty() && ingestPipelines.containsKey(storeName)) {
client.bulkRequest(requestByStore, String.valueOf(ingestPipelines.get(storeName)));
} else if (!requestByStore.isEmpty()) {
requests.addAll(requestByStore);
}
}
if (!requests.isEmpty()) {
client.bulkRequest(requests, null);
}
} catch (final Exception e) {
log.error("Failed to execute bulk Elasticsearch mutation", e);
throw convert(e);
}
}
Aggregations