use of org.apache.accumulo.minicluster.MiniAccumuloCluster in project Gaffer by gchq.
the class RFileReaderRddIT method createAccumuloCluster.
private MiniAccumuloCluster createAccumuloCluster(final String tableName, final Configuration configuration, final List<String> data) throws InterruptedException, AccumuloException, AccumuloSecurityException, IOException, TableExistsException, TableNotFoundException {
final MiniAccumuloCluster cluster = MiniAccumuloClusterProvider.getMiniAccumuloCluster();
final Connector connector = cluster.getConnector(MiniAccumuloClusterProvider.USER, MiniAccumuloClusterProvider.PASSWORD);
connector.tableOperations().create(tableName);
// Add data
final BatchWriter bw = connector.createBatchWriter(tableName, new BatchWriterConfig());
for (int i = 0; i < data.size(); i++) {
final Mutation m = new Mutation("row" + i);
m.put("CF", "CQ", data.get(i));
bw.addMutation(m);
}
bw.close();
// Compact to ensure an RFile is created, sleep to give it a little time to do it
connector.tableOperations().compact(tableName, new CompactionConfig());
Thread.sleep(1000L);
InputConfigurator.fetchColumns(AccumuloInputFormat.class, configuration, Sets.newHashSet(new Pair<>(new Text("CF"), new Text("CQ"))));
return cluster;
}
use of org.apache.accumulo.minicluster.MiniAccumuloCluster in project Gaffer by gchq.
the class RFileReaderRddIT method testRFileReaderRDDCanBeCreatedWith2TableInputs.
@Test
public void testRFileReaderRDDCanBeCreatedWith2TableInputs() throws IOException, InterruptedException, AccumuloSecurityException, AccumuloException, TableNotFoundException, TableExistsException {
// Given
final List<String> dataInput = Arrays.asList("apples", "bananas");
final MiniAccumuloCluster cluster = createAccumuloCluster(tableName, config, dataInput);
// When
final RFileReaderRDD rdd = new RFileReaderRDD(sparkSession.sparkContext(), cluster.getInstanceName(), cluster.getZooKeepers(), MiniAccumuloClusterProvider.USER, MiniAccumuloClusterProvider.PASSWORD, tableName, new HashSet<>(), serialiseConfiguration(config));
// Then
assertThat(rdd.count()).isEqualTo(dataInput.size());
assertThat(rdd.getPartitions()).hasSize(1);
}
use of org.apache.accumulo.minicluster.MiniAccumuloCluster in project YCSB by brianfrankcooper.
the class AccumuloTest method setup.
@BeforeClass
public static void setup() throws Exception {
// Minicluster setup fails on Windows with an UnsatisfiedLinkError.
// Skip if windows.
assumeTrue(!isWindows());
cluster = new MiniAccumuloCluster(workingDir.newFolder("accumulo").getAbsoluteFile(), "protectyaneck");
LOG.debug("starting minicluster");
cluster.start();
LOG.debug("creating connection for admin operations.");
// set up the table and user
final Connector admin = cluster.getConnector("root", "protectyaneck");
admin.tableOperations().create(CoreWorkload.TABLENAME_PROPERTY_DEFAULT);
admin.securityOperations().createLocalUser("ycsb", new PasswordToken("protectyaneck"));
admin.securityOperations().grantTablePermission("ycsb", CoreWorkload.TABLENAME_PROPERTY_DEFAULT, TablePermission.READ);
admin.securityOperations().grantTablePermission("ycsb", CoreWorkload.TABLENAME_PROPERTY_DEFAULT, TablePermission.WRITE);
// set properties the binding will read
properties = new Properties();
properties.setProperty("accumulo.zooKeepers", cluster.getZooKeepers());
properties.setProperty("accumulo.instanceName", cluster.getInstanceName());
properties.setProperty("accumulo.columnFamily", "family");
properties.setProperty("accumulo.username", "ycsb");
properties.setProperty("accumulo.password", "protectyaneck");
// cut down the batch writer timeout so that writes will push through.
properties.setProperty("accumulo.batchWriterMaxLatency", "4");
// set these explicitly to the defaults at the time we're compiled, since they'll be inlined in our class.
properties.setProperty(CoreWorkload.TABLENAME_PROPERTY, CoreWorkload.TABLENAME_PROPERTY_DEFAULT);
properties.setProperty(CoreWorkload.FIELD_COUNT_PROPERTY, CoreWorkload.FIELD_COUNT_PROPERTY_DEFAULT);
properties.setProperty(CoreWorkload.INSERT_ORDER_PROPERTY, "ordered");
}
use of org.apache.accumulo.minicluster.MiniAccumuloCluster in project incubator-rya by apache.
the class AccumuloRyaConnectionCommandsIT method printConnectionDetails_connectedToAccumulo.
@Test
public void printConnectionDetails_connectedToAccumulo() throws IOException {
final MiniAccumuloCluster cluster = getCluster();
final Bootstrap bootstrap = getTestBootstrap();
final JLineShellComponent shell = getTestShell();
// Mock the user entering the correct password.
final ApplicationContext context = bootstrap.getApplicationContext();
final PasswordPrompt mockPrompt = context.getBean(PasswordPrompt.class);
when(mockPrompt.getPassword()).thenReturn("password".toCharArray());
// Connect to the mini accumulo instance.
final String cmd = RyaConnectionCommands.CONNECT_ACCUMULO_CMD + " " + "--username root " + "--instanceName " + cluster.getInstanceName() + " " + "--zookeepers " + cluster.getZooKeepers();
shell.executeCommand(cmd);
// Run the print connection details command.
final CommandResult printResult = shell.executeCommand(RyaConnectionCommands.PRINT_CONNECTION_DETAILS_CMD);
final String msg = (String) printResult.getResult();
final String expected = "The shell is connected to an instance of Accumulo using the following parameters:\n" + " Username: root\n" + " Instance Name: " + cluster.getInstanceName() + "\n" + " Zookeepers: " + cluster.getZooKeepers();
assertEquals(expected, msg);
}
use of org.apache.accumulo.minicluster.MiniAccumuloCluster in project incubator-rya by apache.
the class QueryBenchmarkRunIT method setup.
@BeforeClass
public static void setup() throws Exception {
// Squash loud logs.
Logger.getLogger(ClientCnxn.class).setLevel(Level.ERROR);
// Setup the Mini Accumulo Cluster.
final File miniDataDir = com.google.common.io.Files.createTempDir();
final MiniAccumuloConfig cfg = new MiniAccumuloConfig(miniDataDir, ACCUMULO_PASSWORD);
cluster = new MiniAccumuloCluster(cfg);
cluster.start();
// Create a Rya Client connected to the Mini Accumulo Cluster.
final AccumuloConnectionDetails connDetails = new AccumuloConnectionDetails(ACCUMULO_USER, ACCUMULO_PASSWORD.toCharArray(), cluster.getInstanceName(), cluster.getZooKeepers());
final Connector connector = cluster.getConnector(ACCUMULO_USER, ACCUMULO_PASSWORD);
final RyaClient ryaClient = AccumuloRyaClientFactory.build(connDetails, connector);
// Install an instance of Rya on the mini cluster.
installRya(ryaClient);
// Get a Sail object that is backed by the Rya store that is on the mini cluster.
final AccumuloRdfConfiguration ryaConf = new AccumuloRdfConfiguration();
ryaConf.setTablePrefix(RYA_INSTANCE_NAME);
ryaConf.set(ConfigUtils.CLOUDBASE_USER, ACCUMULO_USER);
ryaConf.set(ConfigUtils.CLOUDBASE_PASSWORD, ACCUMULO_PASSWORD);
ryaConf.set(ConfigUtils.CLOUDBASE_ZOOKEEPERS, cluster.getZooKeepers());
ryaConf.set(ConfigUtils.CLOUDBASE_INSTANCE, cluster.getInstanceName());
ryaConf.set(ConfigUtils.USE_PCJ, "true");
ryaConf.set(ConfigUtils.PCJ_STORAGE_TYPE, PrecomputedJoinStorageType.ACCUMULO.toString());
ryaConf.set(ConfigUtils.PCJ_UPDATER_TYPE, PrecomputedJoinUpdaterType.NO_UPDATE.toString());
sail = RyaSailFactory.getInstance(ryaConf);
// Load some data into the cluster that will match the query we're testing against.
loadTestStatements();
// Add a PCJ to the application that summarizes the query.
createTestPCJ(ryaClient);
}
Aggregations