use of org.janusgraph.diskstorage.BackendException in project janusgraph by JanusGraph.
the class ElasticSearchIndex method mutate.
@Override
public void mutate(Map<String, Map<String, IndexMutation>> mutations, KeyInformation.IndexRetriever information, BaseTransaction tx) throws BackendException {
final List<ElasticSearchMutation> requests = new ArrayList<>();
try {
for (final Map.Entry<String, Map<String, IndexMutation>> stores : mutations.entrySet()) {
final List<ElasticSearchMutation> requestByStore = new ArrayList<>();
final String storeName = stores.getKey();
final String indexStoreName = getIndexStoreName(storeName);
for (final Map.Entry<String, IndexMutation> entry : stores.getValue().entrySet()) {
final String documentId = entry.getKey();
final IndexMutation mutation = entry.getValue();
assert mutation.isConsolidated();
Preconditions.checkArgument(!(mutation.isNew() && mutation.isDeleted()));
Preconditions.checkArgument(!mutation.isNew() || !mutation.hasDeletions());
Preconditions.checkArgument(!mutation.isDeleted() || !mutation.hasAdditions());
// Deletions first
if (mutation.hasDeletions()) {
if (mutation.isDeleted()) {
log.trace("Deleting entire document {}", documentId);
requestByStore.add(ElasticSearchMutation.createDeleteRequest(indexStoreName, storeName, documentId));
} else {
final String script = getDeletionScript(information, storeName, mutation);
final Map<String, Object> doc = compat.prepareScript(script).build();
requestByStore.add(ElasticSearchMutation.createUpdateRequest(indexStoreName, storeName, documentId, doc));
log.trace("Adding script {}", script);
}
}
if (mutation.hasAdditions()) {
if (mutation.isNew()) {
// Index
log.trace("Adding entire document {}", documentId);
final Map<String, Object> source = getNewDocument(mutation.getAdditions(), information.get(storeName));
requestByStore.add(ElasticSearchMutation.createIndexRequest(indexStoreName, storeName, documentId, source));
} else {
final Map upsert;
if (!mutation.hasDeletions()) {
upsert = getNewDocument(mutation.getAdditions(), information.get(storeName));
} else {
upsert = null;
}
final String inline = getAdditionScript(information, storeName, mutation);
if (!inline.isEmpty()) {
final ImmutableMap.Builder builder = compat.prepareScript(inline);
requestByStore.add(ElasticSearchMutation.createUpdateRequest(indexStoreName, storeName, documentId, builder, upsert));
log.trace("Adding script {}", inline);
}
final Map<String, Object> doc = getAdditionDoc(information, storeName, mutation);
if (!doc.isEmpty()) {
final ImmutableMap.Builder builder = ImmutableMap.builder().put(ES_DOC_KEY, doc);
requestByStore.add(ElasticSearchMutation.createUpdateRequest(indexStoreName, storeName, documentId, builder, upsert));
log.trace("Adding update {}", doc);
}
}
}
}
if (!requestByStore.isEmpty() && ingestPipelines.containsKey(storeName)) {
client.bulkRequest(requestByStore, String.valueOf(ingestPipelines.get(storeName)));
} else if (!requestByStore.isEmpty()) {
requests.addAll(requestByStore);
}
}
if (!requests.isEmpty()) {
client.bulkRequest(requests, null);
}
} catch (final Exception e) {
log.error("Failed to execute bulk Elasticsearch mutation", e);
throw convert(e);
}
}
use of org.janusgraph.diskstorage.BackendException in project janusgraph by JanusGraph.
the class ElasticSearchConfigTest method testExternalMappingsViaMapping.
@Test
public void testExternalMappingsViaMapping() throws Exception {
final Duration maxWrite = Duration.ofMillis(2000L);
final String storeName = "test_mapping";
final Configuration indexConfig = GraphDatabaseConfiguration.buildGraphConfiguration().set(USE_EXTERNAL_MAPPINGS, true, INDEX_NAME).restrictTo(INDEX_NAME);
final IndexProvider idx = open(indexConfig);
final ElasticMajorVersion version = ((ElasticSearchIndex) idx).getVersion();
// Test create index KO mapping is not push
final KeyInformation.IndexRetriever indexRetriever = IndexProviderTest.getIndexRetriever(IndexProviderTest.getMapping(idx.getFeatures(), ANALYZER_ENGLISH, ANALYZER_KEYWORD));
final BaseTransactionConfig txConfig = StandardBaseTransactionConfig.of(TimestampProviders.MILLI);
final IndexTransaction itx = new IndexTransaction(idx, indexRetriever, txConfig, maxWrite);
try {
idx.register(storeName, "date", IndexProviderTest.getMapping(idx.getFeatures(), ANALYZER_ENGLISH, ANALYZER_KEYWORD).get("date"), itx);
fail("should fail");
} catch (final PermanentBackendException ignored) {
}
final HttpPut newMapping = new HttpPut("janusgraph_" + storeName);
newMapping.setEntity(new StringEntity(objectMapper.writeValueAsString(readMapping(version, "/strict_mapping.json")), Charset.forName("UTF-8")));
executeRequest(newMapping);
// Test date property OK
idx.register(storeName, "date", IndexProviderTest.getMapping(idx.getFeatures(), ANALYZER_ENGLISH, ANALYZER_KEYWORD).get("date"), itx);
// Test weight property KO
try {
idx.register(storeName, "weight", IndexProviderTest.getMapping(idx.getFeatures(), ANALYZER_ENGLISH, ANALYZER_KEYWORD).get("weight"), itx);
fail("should fail");
} catch (final BackendException ignored) {
}
itx.rollback();
idx.close();
}
use of org.janusgraph.diskstorage.BackendException in project janusgraph by JanusGraph.
the class MapReduceIndexManagement method updateIndex.
/**
* Updates the provided index according to the given {@link SchemaAction}.
* Only {@link SchemaAction#REINDEX} and {@link SchemaAction#REMOVE_INDEX} are supported.
*
* @param index the index to process
* @param updateAction either {@code REINDEX} or {@code REMOVE_INDEX}
* @return a future that returns immediately;
* this method blocks until the Hadoop MapReduce job completes
*/
// TODO make this future actually async and update javadoc @return accordingly
public JanusGraphManagement.IndexJobFuture updateIndex(Index index, SchemaAction updateAction) throws BackendException {
Preconditions.checkNotNull(index, "Index parameter must not be null", index);
Preconditions.checkNotNull(updateAction, "%s parameter must not be null", SchemaAction.class.getSimpleName());
Preconditions.checkArgument(SUPPORTED_ACTIONS.contains(updateAction), "Only these %s parameters are supported: %s (was given %s)", SchemaAction.class.getSimpleName(), SUPPORTED_ACTIONS_STRING, updateAction);
Preconditions.checkArgument(RelationTypeIndex.class.isAssignableFrom(index.getClass()) || JanusGraphIndex.class.isAssignableFrom(index.getClass()), "Index %s has class %s: must be a %s or %s (or subtype)", index.getClass(), RelationTypeIndex.class.getSimpleName(), JanusGraphIndex.class.getSimpleName());
org.apache.hadoop.conf.Configuration hadoopConf = new org.apache.hadoop.conf.Configuration();
ModifiableHadoopConfiguration janusGraphMapReduceConfiguration = ModifiableHadoopConfiguration.of(JanusGraphHadoopConfiguration.MAPRED_NS, hadoopConf);
// The job we'll execute to either REINDEX or REMOVE_INDEX
final Class<? extends IndexUpdateJob> indexJobClass;
final Class<? extends Mapper> mapperClass;
// The class of the IndexUpdateJob and the Mapper that will be used to run it (VertexScanJob vs ScanJob)
if (updateAction.equals(SchemaAction.REINDEX)) {
indexJobClass = IndexRepairJob.class;
mapperClass = HadoopVertexScanMapper.class;
} else if (updateAction.equals(SchemaAction.REMOVE_INDEX)) {
indexJobClass = IndexRemoveJob.class;
mapperClass = HadoopScanMapper.class;
} else {
// Shouldn't get here -- if this exception is ever thrown, update SUPPORTED_ACTIONS
throw new IllegalStateException("Unrecognized " + SchemaAction.class.getSimpleName() + ": " + updateAction);
}
// The column family that serves as input to the IndexUpdateJob
final String readCF;
if (RelationTypeIndex.class.isAssignableFrom(index.getClass())) {
readCF = Backend.EDGESTORE_NAME;
} else {
JanusGraphIndex graphIndex = (JanusGraphIndex) index;
if (graphIndex.isMixedIndex() && !updateAction.equals(SchemaAction.REINDEX))
throw new UnsupportedOperationException("External mixed indexes must be removed in the indexing system directly.");
Preconditions.checkState(JanusGraphIndex.class.isAssignableFrom(index.getClass()));
if (updateAction.equals(SchemaAction.REMOVE_INDEX))
readCF = Backend.INDEXSTORE_NAME;
else
readCF = Backend.EDGESTORE_NAME;
}
janusGraphMapReduceConfiguration.set(JanusGraphHadoopConfiguration.COLUMN_FAMILY_NAME, readCF);
// The MapReduce InputFormat class based on the open graph's store manager
final Class<? extends InputFormat> inputFormat;
final Class<? extends KeyColumnValueStoreManager> storeManagerClass = graph.getBackend().getStoreManagerClass();
if (CASSANDRA_STORE_MANAGER_CLASSES.contains(storeManagerClass)) {
inputFormat = CassandraBinaryInputFormat.class;
// Set the partitioner
IPartitioner part = ((AbstractCassandraStoreManager) graph.getBackend().getStoreManager()).getCassandraPartitioner();
hadoopConf.set("cassandra.input.partitioner.class", part.getClass().getName());
} else if (HBASE_STORE_MANAGER_CLASSES.contains(storeManagerClass)) {
inputFormat = HBaseBinaryInputFormat.class;
} else {
throw new IllegalArgumentException("Store manager class " + storeManagerClass + "is not supported");
}
// The index name and relation type name (if the latter is applicable)
final String indexName = index.name();
final RelationType relationType = RelationTypeIndex.class.isAssignableFrom(index.getClass()) ? ((RelationTypeIndex) index).getType() : null;
final String relationTypeName = relationType == null ? StringUtils.EMPTY : relationType.name();
Preconditions.checkNotNull(indexName);
// Set the class of the IndexUpdateJob
janusGraphMapReduceConfiguration.set(JanusGraphHadoopConfiguration.SCAN_JOB_CLASS, indexJobClass.getName());
// Set the configuration of the IndexUpdateJob
copyIndexJobKeys(hadoopConf, indexName, relationTypeName);
janusGraphMapReduceConfiguration.set(JanusGraphHadoopConfiguration.SCAN_JOB_CONFIG_ROOT, GraphDatabaseConfiguration.class.getName() + "#JOB_NS");
// Copy the StandardJanusGraph configuration under JanusGraphHadoopConfiguration.GRAPH_CONFIG_KEYS
org.apache.commons.configuration.Configuration localConfiguration = graph.getConfiguration().getLocalConfiguration();
localConfiguration.clearProperty(Graph.GRAPH);
copyInputKeys(hadoopConf, localConfiguration);
String jobName = HadoopScanMapper.class.getSimpleName() + "[" + indexJobClass.getSimpleName() + "]";
try {
return new CompletedJobFuture(HadoopScanRunner.runJob(hadoopConf, inputFormat, jobName, mapperClass));
} catch (Exception e) {
return new FailedJobFuture(e);
}
}
use of org.janusgraph.diskstorage.BackendException in project janusgraph by JanusGraph.
the class IDPoolTest method testAllocationTimeoutAndRecovery.
@Test
public void testAllocationTimeoutAndRecovery() throws BackendException {
IMocksControl ctrl = EasyMock.createStrictControl();
final int partition = 42;
final int idNamespace = 777;
final Duration timeout = Duration.ofSeconds(1L);
final IDAuthority mockAuthority = ctrl.createMock(IDAuthority.class);
// Sleep for two seconds, then throw a BackendException
// this whole delegate could be deleted if we abstracted StandardIDPool's internal executor and stopwatches
expect(mockAuthority.getIDBlock(partition, idNamespace, timeout)).andDelegateTo(new IDAuthority() {
@Override
public IDBlock getIDBlock(int partition, int idNamespace, Duration timeout) throws BackendException {
try {
Thread.sleep(2000L);
} catch (InterruptedException e) {
fail();
}
throw new TemporaryBackendException("slow backend");
}
@Override
public List<KeyRange> getLocalIDPartition() {
throw new IllegalArgumentException();
}
@Override
public void setIDBlockSizer(IDBlockSizer sizer) {
throw new IllegalArgumentException();
}
@Override
public void close() {
throw new IllegalArgumentException();
}
@Override
public String getUniqueID() {
throw new IllegalArgumentException();
}
@Override
public boolean supportsInterruption() {
return true;
}
});
expect(mockAuthority.getIDBlock(partition, idNamespace, timeout)).andReturn(new IDBlock() {
@Override
public long numIds() {
return 2;
}
@Override
public long getId(long index) {
return 200;
}
});
expect(mockAuthority.supportsInterruption()).andStubReturn(true);
ctrl.replay();
StandardIDPool pool = new StandardIDPool(mockAuthority, partition, idNamespace, Integer.MAX_VALUE, timeout, 0.1);
try {
pool.nextID();
fail();
} catch (JanusGraphException ignored) {
}
long nextID = pool.nextID();
assertEquals(200, nextID);
ctrl.verify();
}
use of org.janusgraph.diskstorage.BackendException in project janusgraph by JanusGraph.
the class SolrIndex method clearStorage.
@Override
public void clearStorage() throws BackendException {
try {
if (mode != Mode.CLOUD) {
logger.error("Operation only supported for SolrCloud. Cores must be deleted manually through the Solr API when using HTTP mode.");
return;
}
logger.debug("Clearing storage from Solr: {}", solrClient);
final ZkStateReader zkStateReader = ((CloudSolrClient) solrClient).getZkStateReader();
zkStateReader.forciblyRefreshAllClusterStateSlow();
final ClusterState clusterState = zkStateReader.getClusterState();
for (final String collection : clusterState.getCollectionsMap().keySet()) {
logger.debug("Clearing collection [{}] in Solr", collection);
// Collection is not dropped because it may have been created externally
final UpdateRequest deleteAll = newUpdateRequest();
deleteAll.deleteByQuery("*:*");
solrClient.request(deleteAll, collection);
}
} catch (final SolrServerException e) {
logger.error("Unable to clear storage from index due to server error on Solr.", e);
throw new PermanentBackendException(e);
} catch (final IOException e) {
logger.error("Unable to clear storage from index due to low-level I/O error.", e);
throw new PermanentBackendException(e);
} catch (final Exception e) {
logger.error("Unable to clear storage from index due to general error.", e);
throw new PermanentBackendException(e);
}
}
Aggregations