use of org.janusgraph.diskstorage.SimpleScanJobRunner in project janusgraph by JanusGraph.
the class CQLScanJobIT method testSimpleScan.
@Test
public void testSimpleScan() throws InterruptedException, ExecutionException, IOException, BackendException {
int keys = 1000;
int cols = 40;
String[][] values = KeyValueStoreUtil.generateData(keys, cols);
// Make it only half the number of columns for every 2nd key
for (int i = 0; i < values.length; i++) {
if (i % 2 == 0)
values[i] = Arrays.copyOf(values[i], cols / 2);
}
log.debug("Loading values: " + keys + "x" + cols);
KeyColumnValueStoreManager mgr = new CQLStoreManager(GraphDatabaseConfiguration.buildGraphConfiguration());
KeyColumnValueStore store = mgr.openDatabase("edgestore");
StoreTransaction tx = mgr.beginTransaction(StandardBaseTransactionConfig.of(TimestampProviders.MICRO));
KeyColumnValueStoreUtil.loadValues(store, tx, values);
// noop on Cassandra, but harmless
tx.commit();
SimpleScanJobRunner runner = (ScanJob job, Configuration jobConf, String rootNSName) -> {
try {
return new CQLHadoopScanRunner(job).scanJobConf(jobConf).scanJobConfRoot(rootNSName).partitionerOverride("org.apache.cassandra.dht.Murmur3Partitioner").run();
} catch (ClassNotFoundException e) {
throw new RuntimeException(e);
}
};
SimpleScanJob.runBasicTests(keys, cols, runner);
}
Aggregations