use of org.janusgraph.diskstorage.configuration.WriteConfiguration in project janusgraph by JanusGraph.
the class JanusGraphIndexTest method testIndexDataRetrievalWithLimitLessThenBatch.
@Test
public void testIndexDataRetrievalWithLimitLessThenBatch() throws Exception {
WriteConfiguration config = getConfiguration();
config.set("index.search.max-result-set-size", 10);
JanusGraph customGraph = getForceIndexGraph(config);
final JanusGraphManagement management = customGraph.openManagement();
final PropertyKey num = management.makePropertyKey("num").dataType(Integer.class).cardinality(Cardinality.SINGLE).make();
management.buildIndex("oridx", Vertex.class).addKey(num).buildMixedIndex(INDEX);
management.commit();
customGraph.tx().commit();
final GraphTraversalSource g = customGraph.traversal();
g.addV().property("num", 1).next();
g.addV().property("num", 2).next();
customGraph.tx().commit();
assertEquals(2, customGraph.traversal().V().has("num", P.lt(3)).limit(4).toList().size());
JanusGraphFactory.close(customGraph);
}
use of org.janusgraph.diskstorage.configuration.WriteConfiguration in project janusgraph by JanusGraph.
the class HBaseStoreManagerConfigTest method testHBaseStoragePort.
@Test
public void testHBaseStoragePort() throws BackendException {
WriteConfiguration config = hBaseContainer.getWriteConfiguration();
config.set(ConfigElement.getPath(GraphDatabaseConfiguration.STORAGE_PORT), 2000);
HBaseStoreManager manager = new HBaseStoreManager(new BasicConfiguration(GraphDatabaseConfiguration.ROOT_NS, config, BasicConfiguration.Restriction.NONE));
// Check the native property in HBase conf.
String port = manager.getHBaseConf().get("hbase.zookeeper.property.clientPort");
assertEquals("2000", port);
}
use of org.janusgraph.diskstorage.configuration.WriteConfiguration in project janusgraph by JanusGraph.
the class HBaseStoreManagerConfigTest method testHBaseSkipSchemaCheck.
@Test
public // Test HBase skip-schema-check config
void testHBaseSkipSchemaCheck() throws Exception {
org.apache.logging.log4j.core.Logger log = (org.apache.logging.log4j.core.Logger) LogManager.getLogger(HBaseStoreManager.class);
Level savedLevel = log.getLevel();
log.setLevel(Level.DEBUG);
StringWriter writer = new StringWriter();
Appender appender = WriterAppender.createAppender(PatternLayout.newBuilder().withPattern("%p: %m%n").build(), LevelMatchFilter.newBuilder().setLevel(Level.DEBUG).build(), writer, "test", false, false);
appender.start();
log.addAppender(appender);
// Open the HBaseStoreManager with default skip-schema-check false.
WriteConfiguration config = hBaseContainer.getWriteConfiguration();
HBaseStoreManager manager = new HBaseStoreManager(new BasicConfiguration(GraphDatabaseConfiguration.ROOT_NS, config, BasicConfiguration.Restriction.NONE));
assertEquals(manager.getDeployment(), DistributedStoreManager.Deployment.REMOTE);
// Verify we get "Performing schema check".
assertTrue(writer.toString().contains("Performing schema check"), writer.toString());
manager.close();
// Open the HBaseStoreManager with skip-schema-check true.
config.set(ConfigElement.getPath(HBaseStoreManager.SKIP_SCHEMA_CHECK), true);
manager = new HBaseStoreManager(new BasicConfiguration(GraphDatabaseConfiguration.ROOT_NS, config, BasicConfiguration.Restriction.NONE));
writer.getBuffer().setLength(0);
assertEquals(manager.getDeployment(), DistributedStoreManager.Deployment.REMOTE);
// Verify we get "Skipping schema check".
assertTrue(writer.toString().contains("Skipping schema check"), writer.toString());
log.removeAppender(appender);
log.setLevel(savedLevel);
// Test when hbase table does not exist with skip-schema-check true.
config.set(ConfigElement.getPath(HBaseStoreManager.HBASE_TABLE), "unknown_table");
HBaseStoreManager skipSchemaManager = new HBaseStoreManager(new BasicConfiguration(GraphDatabaseConfiguration.ROOT_NS, config, BasicConfiguration.Restriction.NONE));
Exception ex = assertThrows(PermanentBackendException.class, () -> skipSchemaManager.getLocalKeyPartition());
assertEquals("Table unknown_table doesn't exist in HBase!", ex.getMessage());
manager.close();
}
use of org.janusgraph.diskstorage.configuration.WriteConfiguration in project janusgraph by JanusGraph.
the class CQLScanJobIT method testPartitionedVertexScan.
@Test
public void testPartitionedVertexScan() throws Exception {
tearDown();
clearGraph(getConfiguration());
WriteConfiguration partConf = getConfiguration();
open(partConf);
mgmt.makeVertexLabel("part").partition().make();
finishSchema();
JanusGraphVertex supernode = graph.addVertex("part");
for (int i = 0; i < 128; i++) {
JanusGraphVertex v = graph.addVertex("part");
v.addEdge("default", supernode);
if (0 < i && 0 == i % 4)
graph.tx().commit();
}
graph.tx().commit();
org.apache.hadoop.conf.Configuration c = new org.apache.hadoop.conf.Configuration();
c.set(ConfigElement.getPath(JanusGraphHadoopConfiguration.GRAPH_CONFIG_KEYS, true) + "." + "storage.cql.keyspace", getClass().getSimpleName().toLowerCase());
c.set(ConfigElement.getPath(JanusGraphHadoopConfiguration.GRAPH_CONFIG_KEYS, true) + "." + "storage.backend", "cql");
c.set(ConfigElement.getPath(JanusGraphHadoopConfiguration.GRAPH_CONFIG_KEYS, true) + "." + "storage.port", String.valueOf(cql.getMappedCQLPort()));
c.set("cassandra.input.partitioner.class", "org.apache.cassandra.dht.Murmur3Partitioner");
Job job = getVertexJobWithDefaultMapper(c);
// Should throw an exception since filter-partitioned-vertices wasn't enabled
assertFalse(job.waitForCompletion(true));
}
use of org.janusgraph.diskstorage.configuration.WriteConfiguration in project janusgraph by JanusGraph.
the class BerkeleyLuceneIndexMetricTest method getConfiguration.
@Override
public WriteConfiguration getConfiguration() {
WriteConfiguration config = BerkeleyStorageSetup.getBerkeleyJEGraphConfiguration();
ModifiableConfiguration modifiableConfiguration = new ModifiableConfiguration(GraphDatabaseConfiguration.ROOT_NS, config, BasicConfiguration.Restriction.NONE);
modifiableConfiguration.set(BASIC_METRICS, true);
modifiableConfiguration.set(INDEX_BACKEND, "lucene", "search");
modifiableConfiguration.set(INDEX_DIRECTORY, getHomeDir("lucene"), "search");
return config;
}
Aggregations