use of org.janusgraph.diskstorage.BackendException in project janusgraph by JanusGraph.
the class SolrIndex method clearStorage.
@Override
public void clearStorage() throws BackendException {
try {
if (mode != Mode.CLOUD) {
logger.error("Operation only supported for SolrCloud. Cores must be deleted manually through the Solr API when using HTTP mode.");
return;
}
logger.debug("Clearing storage from Solr: {}", solrClient);
final ZkStateReader zkStateReader = ((CloudSolrClient) solrClient).getZkStateReader();
zkStateReader.forciblyRefreshAllClusterStateSlow();
final ClusterState clusterState = zkStateReader.getClusterState();
for (final String collection : clusterState.getCollectionsMap().keySet()) {
logger.debug("Clearing collection [{}] in Solr", collection);
// Collection is not dropped because it may have been created externally
final UpdateRequest deleteAll = newUpdateRequest();
deleteAll.deleteByQuery("*:*");
solrClient.request(deleteAll, collection);
}
} catch (final SolrServerException e) {
logger.error("Unable to clear storage from index due to server error on Solr.", e);
throw new PermanentBackendException(e);
} catch (final IOException e) {
logger.error("Unable to clear storage from index due to low-level I/O error.", e);
throw new PermanentBackendException(e);
} catch (final Exception e) {
logger.error("Unable to clear storage from index due to general error.", e);
throw new PermanentBackendException(e);
}
}
use of org.janusgraph.diskstorage.BackendException in project janusgraph by JanusGraph.
the class SolrIndex method mutate.
@Override
public void mutate(Map<String, Map<String, IndexMutation>> mutations, KeyInformation.IndexRetriever information, BaseTransaction tx) throws BackendException {
logger.debug("Mutating SOLR");
try {
for (final Map.Entry<String, Map<String, IndexMutation>> stores : mutations.entrySet()) {
final String collectionName = stores.getKey();
final String keyIdField = getKeyFieldId(collectionName);
final List<String> deleteIds = new ArrayList<>();
final Collection<SolrInputDocument> changes = new ArrayList<>();
for (final Map.Entry<String, IndexMutation> entry : stores.getValue().entrySet()) {
final String docId = entry.getKey();
final IndexMutation mutation = entry.getValue();
Preconditions.checkArgument(!(mutation.isNew() && mutation.isDeleted()));
Preconditions.checkArgument(!mutation.isNew() || !mutation.hasDeletions());
Preconditions.checkArgument(!mutation.isDeleted() || !mutation.hasAdditions());
// Handle any deletions
if (mutation.hasDeletions()) {
if (mutation.isDeleted()) {
logger.trace("Deleting entire document {}", docId);
deleteIds.add(docId);
} else {
final List<IndexEntry> fieldDeletions = new ArrayList<>(mutation.getDeletions());
if (mutation.hasAdditions()) {
for (final IndexEntry indexEntry : mutation.getAdditions()) {
fieldDeletions.remove(indexEntry);
}
}
handleRemovalsFromIndex(collectionName, keyIdField, docId, fieldDeletions, information);
}
}
if (mutation.hasAdditions()) {
final int ttl = mutation.determineTTL();
final SolrInputDocument doc = new SolrInputDocument();
doc.setField(keyIdField, docId);
final boolean isNewDoc = mutation.isNew();
if (isNewDoc)
logger.trace("Adding new document {}", docId);
final Map<String, Object> adds = collectFieldValues(mutation.getAdditions(), collectionName, information);
// If cardinality is not single then we should use the "add" operation to update
// the index so we don't overwrite existing values.
adds.keySet().forEach(v -> {
final KeyInformation keyInformation = information.get(collectionName, v);
final String solrOp = keyInformation.getCardinality() == Cardinality.SINGLE ? "set" : "add";
doc.setField(v, isNewDoc ? adds.get(v) : new HashMap<String, Object>(1) {
{
put(solrOp, adds.get(v));
}
});
});
if (ttl > 0) {
Preconditions.checkArgument(isNewDoc, "Solr only supports TTL on new documents [%s]", docId);
doc.setField(ttlField, String.format("+%dSECONDS", ttl));
}
changes.add(doc);
}
}
commitDeletes(collectionName, deleteIds);
commitChanges(collectionName, changes);
}
} catch (final IllegalArgumentException e) {
throw new PermanentBackendException("Unable to complete query on Solr.", e);
} catch (final Exception e) {
throw storageException(e);
}
}
use of org.janusgraph.diskstorage.BackendException in project janusgraph by JanusGraph.
the class KCVSConfigurationBuilder method buildConfiguration.
public KCVSConfiguration buildConfiguration(final BackendOperation.TransactionalProvider txProvider, final KeyColumnValueStore store, final String identifier, final Configuration config) {
try {
KCVSConfiguration keyColumnValueStoreConfiguration = new KCVSConfiguration(txProvider, config, store, identifier);
keyColumnValueStoreConfiguration.setMaxOperationWaitTime(config.get(SETUP_WAITTIME));
return keyColumnValueStoreConfiguration;
} catch (BackendException e) {
throw new JanusGraphException("Could not open global configuration", e);
}
}
use of org.janusgraph.diskstorage.BackendException in project janusgraph by JanusGraph.
the class ConsistentKeyIDAuthority method getIDBlock.
@Override
public synchronized IDBlock getIDBlock(final int partition, final int idNamespace, Duration timeout) throws BackendException {
Preconditions.checkArgument(partition >= 0 && partition < (1 << partitionBitWidth), "Invalid partition id [%s] for bit width [%s]", partition, partitionBitWidth);
// can be any non-negative value
Preconditions.checkArgument(idNamespace >= 0);
final Timer methodTime = times.getTimer().start();
final long blockSize = getBlockSize(idNamespace);
final long idUpperBound = getIdUpperBound(idNamespace);
final int maxAvailableBits = (VariableLong.unsignedBitLength(idUpperBound) - 1) - uniqueIdBitWidth;
Preconditions.checkArgument(maxAvailableBits > 0, "Unique id bit width [%s] is too wide for id-namespace [%s] id bound [%s]", uniqueIdBitWidth, idNamespace, idUpperBound);
final long idBlockUpperBound = (1L << maxAvailableBits);
final List<Integer> exhaustedUniquePIDs = new ArrayList<>(randomUniqueIDLimit);
Duration backoffMS = idApplicationWaitMS;
Preconditions.checkArgument(idBlockUpperBound > blockSize, "Block size [%s] is larger than upper bound [%s] for bit width [%s]", blockSize, idBlockUpperBound, uniqueIdBitWidth);
while (methodTime.elapsed().compareTo(timeout) < 0) {
final int uniquePID = getUniquePartitionID();
final StaticBuffer partitionKey = getPartitionKey(partition, idNamespace, uniquePID);
try {
long nextStart = getCurrentID(partitionKey);
if (idBlockUpperBound - blockSize <= nextStart) {
log.info("ID overflow detected on partition({})-namespace({}) with uniqueid {}. Current id {}, block size {}, and upper bound {} for bit width {}.", partition, idNamespace, uniquePID, nextStart, blockSize, idBlockUpperBound, uniqueIdBitWidth);
if (randomizeUniqueId) {
exhaustedUniquePIDs.add(uniquePID);
if (exhaustedUniquePIDs.size() == randomUniqueIDLimit)
throw new IDPoolExhaustedException(String.format("Exhausted %d uniqueid(s) on partition(%d)-namespace(%d): %s", exhaustedUniquePIDs.size(), partition, idNamespace, StringUtils.join(exhaustedUniquePIDs, ",")));
else
throw new UniqueIDExhaustedException(String.format("Exhausted ID partition(%d)-namespace(%d) with uniqueid %d (uniqueid attempt %d/%d)", partition, idNamespace, uniquePID, exhaustedUniquePIDs.size(), randomUniqueIDLimit));
}
throw new IDPoolExhaustedException("Exhausted id block for partition(" + partition + ")-namespace(" + idNamespace + ") with upper bound: " + idBlockUpperBound);
}
// calculate the start (inclusive) and end (exclusive) of the allocation we're about to attempt
assert idBlockUpperBound - blockSize > nextStart;
long nextEnd = nextStart + blockSize;
StaticBuffer target = null;
// attempt to write our claim on the next id block
boolean success = false;
try {
Timer writeTimer = times.getTimer().start();
target = getBlockApplication(nextEnd, writeTimer.getStartTime());
// copy for the inner class
final StaticBuffer finalTarget = target;
BackendOperation.execute(txh -> {
idStore.mutate(partitionKey, Collections.singletonList(StaticArrayEntry.of(finalTarget)), KeyColumnValueStore.NO_DELETIONS, txh);
return true;
}, this, times);
writeTimer.stop();
final boolean distributed = manager.getFeatures().isDistributed();
Duration writeElapsed = writeTimer.elapsed();
if (idApplicationWaitMS.compareTo(writeElapsed) < 0 && distributed) {
throw new TemporaryBackendException("Wrote claim for id block [" + nextStart + ", " + nextEnd + ") in " + (writeElapsed) + " => too slow, threshold is: " + idApplicationWaitMS);
} else {
assert 0 != target.length();
final StaticBuffer[] slice = getBlockSlice(nextEnd);
if (distributed) {
sleepAndConvertInterrupts(idApplicationWaitMS.plus(waitGracePeriod));
}
// Read all id allocation claims on this partition, for the counter value we're claiming
final List<Entry> blocks = BackendOperation.execute((BackendOperation.Transactional<List<Entry>>) txh -> idStore.getSlice(new KeySliceQuery(partitionKey, slice[0], slice[1]), txh), this, times);
if (blocks == null)
throw new TemporaryBackendException("Could not read from storage");
if (blocks.isEmpty())
throw new PermanentBackendException("It seems there is a race-condition in the block application. " + "If you have multiple JanusGraph instances running on one physical machine, ensure that they have unique machine idAuthorities");
/* If our claim is the lexicographically first one, then our claim
* is the most senior one and we own this id block
*/
if (target.equals(blocks.get(0).getColumnAs(StaticBuffer.STATIC_FACTORY))) {
ConsistentKeyIDBlock idBlock = new ConsistentKeyIDBlock(nextStart, blockSize, uniqueIdBitWidth, uniquePID);
if (log.isDebugEnabled()) {
log.debug("Acquired ID block [{}] on partition({})-namespace({}) (my rid is {})", idBlock, partition, idNamespace, uid);
}
success = true;
return idBlock;
} else {
// Another claimant beat us to this id block -- try again.
log.debug("Failed to acquire ID block [{},{}) (another host claimed it first)", nextStart, nextEnd);
}
}
} finally {
if (!success && null != target) {
// Delete claim to not pollute id space
for (int attempt = 0; attempt < ROLLBACK_ATTEMPTS; attempt++) {
try {
// copy for the inner class
final StaticBuffer finalTarget = target;
BackendOperation.execute(txh -> {
idStore.mutate(partitionKey, KeyColumnValueStore.NO_ADDITIONS, Collections.singletonList(finalTarget), txh);
return true;
}, new // Use normal consistency level for these non-critical delete operations
BackendOperation.TransactionalProvider() {
@Override
public StoreTransaction openTx() throws BackendException {
return manager.beginTransaction(storeTxConfigBuilder.build());
}
@Override
public void close() {
}
}, times);
break;
} catch (BackendException e) {
log.warn("Storage exception while deleting old block application - retrying in {}", rollbackWaitTime, e);
if (!rollbackWaitTime.isZero())
sleepAndConvertInterrupts(rollbackWaitTime);
}
}
}
}
} catch (UniqueIDExhaustedException e) {
// No need to increment the backoff wait time or to sleep
log.warn(e.getMessage());
} catch (TemporaryBackendException e) {
backoffMS = Durations.min(backoffMS.multipliedBy(2), idApplicationWaitMS.multipliedBy(32));
log.warn("Temporary storage exception while acquiring id block - retrying in {}: {}", backoffMS, e);
sleepAndConvertInterrupts(backoffMS);
}
}
throw new TemporaryLockingException(String.format("Reached timeout %d (%s elapsed) when attempting to allocate id block on partition(%d)-namespace(%d)", timeout.getNano(), methodTime, partition, idNamespace));
}
use of org.janusgraph.diskstorage.BackendException in project janusgraph by JanusGraph.
the class ManagementSystem method removeGhostVertices.
@Override
public ScanJobFuture removeGhostVertices(int numOfThreads) {
StandardScanner.Builder builder = graph.getBackend().buildEdgeScanJob();
builder.setJob(new GhostVertexRemover(graph));
builder.setNumProcessingThreads(numOfThreads);
ScanJobFuture future;
try {
future = builder.execute();
} catch (BackendException e) {
throw new JanusGraphException(e);
}
return future;
}
Aggregations