use of org.janusgraph.diskstorage.PermanentBackendException in project janusgraph by JanusGraph.
the class HBaseStoreManager method ensureColumnFamilyExists.
private void ensureColumnFamilyExists(String tableName, String columnFamily, int ttlInSeconds) throws BackendException {
AdminMask adm = null;
try {
adm = getAdminInterface();
HTableDescriptor desc = ensureTableExists(tableName, columnFamily, ttlInSeconds);
Preconditions.checkNotNull(desc);
HColumnDescriptor cf = desc.getFamily(Bytes.toBytes(columnFamily));
// Create our column family, if necessary
if (cf == null) {
try {
if (!adm.isTableDisabled(tableName)) {
adm.disableTable(tableName);
}
} catch (TableNotEnabledException e) {
logger.debug("Table {} already disabled", tableName);
} catch (IOException e) {
throw new TemporaryBackendException(e);
}
try {
HColumnDescriptor columnDescriptor = new HColumnDescriptor(columnFamily);
setCFOptions(columnDescriptor, ttlInSeconds);
adm.addColumn(tableName, columnDescriptor);
try {
logger.debug("Added HBase ColumnFamily {}, waiting for 1 sec. to propogate.", columnFamily);
Thread.sleep(1000L);
} catch (InterruptedException ie) {
throw new TemporaryBackendException(ie);
}
adm.enableTable(tableName);
} catch (TableNotFoundException ee) {
logger.error("TableNotFoundException", ee);
throw new PermanentBackendException(ee);
} catch (org.apache.hadoop.hbase.TableExistsException ee) {
logger.debug("Swallowing exception {}", ee);
} catch (IOException ee) {
throw new TemporaryBackendException(ee);
}
}
} finally {
IOUtils.closeQuietly(adm);
}
}
use of org.janusgraph.diskstorage.PermanentBackendException in project janusgraph by JanusGraph.
the class ElasticSearchConfigTest method testExternalDynamic.
@Test
public void testExternalDynamic() throws Exception {
final Duration maxWrite = Duration.ofMillis(2000L);
final String storeName = "test_mapping";
final Configuration indexConfig = GraphDatabaseConfiguration.buildGraphConfiguration().set(USE_EXTERNAL_MAPPINGS, true, INDEX_NAME).restrictTo(INDEX_NAME);
final IndexProvider idx = open(indexConfig);
final ElasticMajorVersion version = ((ElasticSearchIndex) idx).getVersion();
// Test create index KO mapping is not push
final KeyInformation.IndexRetriever indexRetriever = IndexProviderTest.getIndexRetriever(IndexProviderTest.getMapping(idx.getFeatures(), ANALYZER_ENGLISH, ANALYZER_KEYWORD));
final BaseTransactionConfig txConfig = StandardBaseTransactionConfig.of(TimestampProviders.MILLI);
final IndexTransaction itx = new IndexTransaction(idx, indexRetriever, txConfig, maxWrite);
try {
idx.register(storeName, "date", IndexProviderTest.getMapping(idx.getFeatures(), ANALYZER_ENGLISH, ANALYZER_KEYWORD).get("date"), itx);
fail("should fail");
} catch (final PermanentBackendException ignored) {
}
final HttpPut newMapping = new HttpPut("janusgraph_" + storeName);
newMapping.setEntity(new StringEntity(objectMapper.writeValueAsString(readMapping(version, "/dynamic_mapping.json")), Charset.forName("UTF-8")));
executeRequest(newMapping);
// Test date property OK
idx.register(storeName, "date", IndexProviderTest.getMapping(idx.getFeatures(), ANALYZER_ENGLISH, ANALYZER_KEYWORD).get("date"), itx);
// Test weight property OK because dynamic mapping
idx.register(storeName, "weight", IndexProviderTest.getMapping(idx.getFeatures(), ANALYZER_ENGLISH, ANALYZER_KEYWORD).get("weight"), itx);
itx.rollback();
idx.close();
}
use of org.janusgraph.diskstorage.PermanentBackendException in project janusgraph by JanusGraph.
the class BerkeleyJEKeyValueStore method insert.
public void insert(StaticBuffer key, StaticBuffer value, StoreTransaction txh, boolean allowOverwrite) throws BackendException {
Transaction tx = getTransaction(txh);
try {
OperationStatus status;
log.trace("db={}, op=insert, tx={}", name, txh);
if (allowOverwrite)
status = db.put(tx, key.as(ENTRY_FACTORY), value.as(ENTRY_FACTORY));
else
status = db.putNoOverwrite(tx, key.as(ENTRY_FACTORY), value.as(ENTRY_FACTORY));
if (status != OperationStatus.SUCCESS) {
if (status == OperationStatus.KEYEXIST) {
throw new PermanentBackendException("Key already exists on no-overwrite.");
} else {
throw new PermanentBackendException("Could not write entity, return status: " + status);
}
}
} catch (DatabaseException e) {
throw new PermanentBackendException(e);
}
}
use of org.janusgraph.diskstorage.PermanentBackendException in project janusgraph by JanusGraph.
the class AstyanaxKeyColumnValueStore method getKeys.
@Override
public KeyIterator getKeys(@Nullable SliceQuery sliceQuery, StoreTransaction txh) throws BackendException {
if (storeManager.getPartitioner() != Partitioner.RANDOM)
throw new PermanentBackendException("This operation is only allowed when random partitioner (md5 or murmur3) is used.");
AllRowsQuery allRowsQuery = keyspace.prepareQuery(columnFamily).getAllRows();
if (sliceQuery != null) {
allRowsQuery.withColumnRange(sliceQuery.getSliceStart().asByteBuffer(), sliceQuery.getSliceEnd().asByteBuffer(), false, sliceQuery.getLimit());
}
Rows<ByteBuffer, ByteBuffer> result;
try {
/* Note: we need to fetch columns for each row as well to remove "range ghosts" */
OperationResult op = // pre-fetch that many rows at a time
allRowsQuery.setRowLimit(storeManager.getPageSize()).setConcurrencyLevel(// one execution thread for fetching portion of rows
1).setExceptionCallback(new ExceptionCallback() {
private int retries = 0;
@Override
public boolean onException(ConnectionException e) {
try {
// make 3 re-tries
return retries > 2;
} finally {
retries++;
}
}
}).execute();
result = ((OperationResult<Rows<ByteBuffer, ByteBuffer>>) op).getResult();
} catch (ConnectionException e) {
throw new PermanentBackendException(e);
}
return new RowIterator(result.iterator(), sliceQuery);
}
use of org.janusgraph.diskstorage.PermanentBackendException in project janusgraph by JanusGraph.
the class AstyanaxKeyColumnValueStore method getNamesSlice.
public Map<StaticBuffer, EntryList> getNamesSlice(List<StaticBuffer> keys, SliceQuery query, StoreTransaction txh) throws BackendException {
/*
* RowQuery<K,C> should be parametrized as
* RowQuery<ByteBuffer,ByteBuffer>. However, this causes the following
* compilation error when attempting to call withColumnRange on a
* RowQuery<ByteBuffer,ByteBuffer> instance:
*
* java.lang.Error: Unresolved compilation problem: The method
* withColumnRange(ByteBuffer, ByteBuffer, boolean, int) is ambiguous
* for the type RowQuery<ByteBuffer,ByteBuffer>
*
* The compiler substitutes ByteBuffer=C for both startColumn and
* endColumn, compares it to its identical twin with that type
* hard-coded, and dies.
*
*/
// Add one for last column potentially removed in CassandraHelper.makeEntryList
final int queryLimit = query.getLimit() + (query.hasLimit() ? 1 : 0);
final int pageLimit = Math.min(this.readPageSize, queryLimit);
ByteBuffer sliceStart = query.getSliceStart().asByteBuffer();
final ByteBuffer sliceEnd = query.getSliceEnd().asByteBuffer();
final RowSliceQuery rq = keyspace.prepareQuery(columnFamily).setConsistencyLevel(getTx(txh).getReadConsistencyLevel().getAstyanax()).withRetryPolicy(retryPolicy.duplicate()).getKeySlice(CassandraHelper.convert(keys));
// Don't directly chain due to ambiguity resolution; see top comment
rq.withColumnRange(sliceStart, sliceEnd, false, pageLimit);
final OperationResult<Rows<ByteBuffer, ByteBuffer>> r;
try {
r = (OperationResult<Rows<ByteBuffer, ByteBuffer>>) rq.execute();
} catch (ConnectionException e) {
throw new TemporaryBackendException(e);
}
final Rows<ByteBuffer, ByteBuffer> rows = r.getResult();
final Map<StaticBuffer, EntryList> result = new HashMap<>(rows.size());
for (Row<ByteBuffer, ByteBuffer> row : rows) {
assert !result.containsKey(row.getKey());
final ByteBuffer key = row.getKey();
ColumnList<ByteBuffer> pageColumns = row.getColumns();
final List<Column<ByteBuffer>> queryColumns = new ArrayList();
Iterables.addAll(queryColumns, pageColumns);
while (pageColumns.size() == pageLimit && queryColumns.size() < queryLimit) {
final Column<ByteBuffer> lastColumn = queryColumns.get(queryColumns.size() - 1);
sliceStart = lastColumn.getName();
// No possibility of two values at the same column name, so start the
// next slice one bit after the last column found by the previous query.
// byte[] is little-endian
Integer position = null;
for (int i = sliceStart.array().length - 1; i >= 0; i--) {
if (sliceStart.array()[i] < Byte.MAX_VALUE) {
position = i;
sliceStart.array()[i]++;
break;
}
}
if (null == position) {
throw new PermanentBackendException("Column was not incrementable");
}
final RowQuery pageQuery = keyspace.prepareQuery(columnFamily).setConsistencyLevel(getTx(txh).getReadConsistencyLevel().getAstyanax()).withRetryPolicy(retryPolicy.duplicate()).getKey(row.getKey());
// Don't directly chain due to ambiguity resolution; see top comment
pageQuery.withColumnRange(sliceStart, sliceEnd, false, pageLimit);
final OperationResult<ColumnList<ByteBuffer>> pageResult;
try {
pageResult = (OperationResult<ColumnList<ByteBuffer>>) pageQuery.execute();
} catch (ConnectionException e) {
throw new TemporaryBackendException(e);
}
if (Thread.interrupted()) {
throw new TraversalInterruptedException();
}
// Reset the incremented position to avoid leaking mutations up the
// stack to callers - sliceStart.array() in fact refers to a column name
// that will be later read to deserialize an edge (since we assigned it
// via de-referencing a column from the previous query).
sliceStart.array()[position]--;
pageColumns = pageResult.getResult();
Iterables.addAll(queryColumns, pageColumns);
}
result.put(StaticArrayBuffer.of(key), CassandraHelper.makeEntryList(queryColumns, entryGetter, query.getSliceEnd(), query.getLimit()));
}
return result;
}
Aggregations