use of org.apache.accumulo.core.cli.BatchWriterOpts in project accumulo by apache.
the class FateStarvationIT method run.
@Test
public void run() throws Exception {
String tableName = getUniqueNames(1)[0];
Connector c = getConnector();
c.tableOperations().create(tableName);
c.tableOperations().addSplits(tableName, TestIngest.getSplitPoints(0, 100000, 50));
TestIngest.Opts opts = new TestIngest.Opts();
opts.random = 89;
opts.timestamp = 7;
opts.dataSize = 50;
opts.rows = 100000;
opts.cols = 1;
opts.setTableName(tableName);
ClientConfiguration clientConf = cluster.getClientConfig();
if (clientConf.hasSasl()) {
opts.updateKerberosCredentials(clientConf);
} else {
opts.setPrincipal(getAdminPrincipal());
}
TestIngest.ingest(c, opts, new BatchWriterOpts());
c.tableOperations().flush(tableName, null, null, true);
List<Text> splits = new ArrayList<>(TestIngest.getSplitPoints(0, 100000, 67));
Random rand = new Random();
for (int i = 0; i < 100; i++) {
int idx1 = rand.nextInt(splits.size() - 1);
int idx2 = rand.nextInt(splits.size() - (idx1 + 1)) + idx1 + 1;
c.tableOperations().compact(tableName, splits.get(idx1), splits.get(idx2), false, false);
}
c.tableOperations().offline(tableName);
FunctionalTestUtils.assertNoDanglingFateLocks(getConnector().getInstance(), getCluster());
}
use of org.apache.accumulo.core.cli.BatchWriterOpts in project accumulo by apache.
the class DeleteIT method deleteTest.
public static void deleteTest(Connector c, AccumuloCluster cluster, String user, String password, String tableName, String keytab) throws Exception {
VerifyIngest.Opts vopts = new VerifyIngest.Opts();
TestIngest.Opts opts = new TestIngest.Opts();
vopts.setTableName(tableName);
opts.setTableName(tableName);
vopts.rows = opts.rows = 1000;
vopts.cols = opts.cols = 1;
vopts.random = opts.random = 56;
assertTrue("Expected one of password or keytab", null != password || null != keytab);
if (null != password) {
assertNull("Given password, expected null keytab", keytab);
Password passwd = new Password(password);
opts.setPassword(passwd);
opts.setPrincipal(user);
vopts.setPassword(passwd);
vopts.setPrincipal(user);
}
if (null != keytab) {
assertNull("Given keytab, expect null password", password);
ClientConfiguration clientConfig = cluster.getClientConfig();
opts.updateKerberosCredentials(clientConfig);
vopts.updateKerberosCredentials(clientConfig);
}
BatchWriterOpts BWOPTS = new BatchWriterOpts();
TestIngest.ingest(c, opts, BWOPTS);
String[] args = null;
assertTrue("Expected one of password or keytab", null != password || null != keytab);
if (null != password) {
assertNull("Given password, expected null keytab", keytab);
args = new String[] { "-u", user, "-p", password, "-i", cluster.getInstanceName(), "-z", cluster.getZooKeepers(), "--table", tableName };
}
if (null != keytab) {
assertNull("Given keytab, expect null password", password);
args = new String[] { "-u", user, "-i", cluster.getInstanceName(), "-z", cluster.getZooKeepers(), "--table", tableName, "--keytab", keytab };
}
assertEquals(0, cluster.getClusterControl().exec(TestRandomDeletes.class, args));
TestIngest.ingest(c, opts, BWOPTS);
VerifyIngest.verifyIngest(c, vopts, new ScannerOpts());
}
use of org.apache.accumulo.core.cli.BatchWriterOpts in project accumulo by apache.
the class MasterFailoverIT method test.
@Test
public void test() throws Exception {
Connector c = getConnector();
String[] names = getUniqueNames(2);
c.tableOperations().create(names[0]);
TestIngest.Opts opts = new TestIngest.Opts();
opts.setTableName(names[0]);
ClientConfiguration clientConf = cluster.getClientConfig();
if (clientConf.hasSasl()) {
opts.updateKerberosCredentials(clientConf);
} else {
opts.setPrincipal(getAdminPrincipal());
}
TestIngest.ingest(c, opts, new BatchWriterOpts());
ClusterControl control = cluster.getClusterControl();
control.stopAllServers(ServerType.MASTER);
// start up a new one
control.startAllServers(ServerType.MASTER);
// talk to it
c.tableOperations().rename(names[0], names[1]);
VerifyIngest.Opts vopts = new VerifyIngest.Opts();
vopts.setTableName(names[1]);
if (clientConf.hasSasl()) {
vopts.updateKerberosCredentials(clientConf);
} else {
vopts.setPrincipal(getAdminPrincipal());
}
VerifyIngest.verifyIngest(c, vopts, new ScannerOpts());
}
use of org.apache.accumulo.core.cli.BatchWriterOpts in project accumulo by apache.
the class MaxOpenIT method run.
@Test
public void run() throws Exception {
final Connector c = getConnector();
final String tableName = getUniqueNames(1)[0];
final ClientConfiguration clientConf = cluster.getClientConfig();
c.tableOperations().create(tableName);
c.tableOperations().setProperty(tableName, Property.TABLE_MAJC_RATIO.getKey(), "10");
c.tableOperations().addSplits(tableName, TestIngest.getSplitPoints(0, NUM_TO_INGEST, NUM_TABLETS));
// the following loop should create three tablets in each map file
for (int i = 0; i < 3; i++) {
TestIngest.Opts opts = new TestIngest.Opts();
opts.timestamp = i;
opts.dataSize = 50;
opts.rows = NUM_TO_INGEST;
opts.cols = 1;
opts.random = i;
opts.setTableName(tableName);
if (clientConf.hasSasl()) {
opts.updateKerberosCredentials(clientConf);
} else {
opts.setPrincipal(getAdminPrincipal());
}
TestIngest.ingest(c, opts, new BatchWriterOpts());
c.tableOperations().flush(tableName, null, null, true);
FunctionalTestUtils.checkRFiles(c, tableName, NUM_TABLETS, NUM_TABLETS, i + 1, i + 1);
}
List<Range> ranges = new ArrayList<>(NUM_TO_INGEST);
for (int i = 0; i < NUM_TO_INGEST; i++) {
ranges.add(new Range(TestIngest.generateRow(i, 0)));
}
long time1 = batchScan(c, tableName, ranges, 1);
// run it again, now that stuff is cached on the client and sever
time1 = batchScan(c, tableName, ranges, 1);
long time2 = batchScan(c, tableName, ranges, NUM_TABLETS);
System.out.printf("Single thread scan time %6.2f %n", time1 / 1000.0);
System.out.printf("Multiple thread scan time %6.2f %n", time2 / 1000.0);
}
use of org.apache.accumulo.core.cli.BatchWriterOpts in project accumulo by apache.
the class TableIT method test.
@Test
public void test() throws Exception {
Assume.assumeThat(getClusterType(), CoreMatchers.is(ClusterType.MINI));
AccumuloCluster cluster = getCluster();
MiniAccumuloClusterImpl mac = (MiniAccumuloClusterImpl) cluster;
String rootPath = mac.getConfig().getDir().getAbsolutePath();
Connector c = getConnector();
TableOperations to = c.tableOperations();
String tableName = getUniqueNames(1)[0];
to.create(tableName);
TestIngest.Opts opts = new TestIngest.Opts();
VerifyIngest.Opts vopts = new VerifyIngest.Opts();
ClientConfiguration clientConfig = getCluster().getClientConfig();
if (clientConfig.hasSasl()) {
opts.updateKerberosCredentials(clientConfig);
vopts.updateKerberosCredentials(clientConfig);
} else {
opts.setPrincipal(getAdminPrincipal());
vopts.setPrincipal(getAdminPrincipal());
}
opts.setTableName(tableName);
TestIngest.ingest(c, opts, new BatchWriterOpts());
to.flush(tableName, null, null, true);
vopts.setTableName(tableName);
VerifyIngest.verifyIngest(c, vopts, new ScannerOpts());
Table.ID id = Table.ID.of(to.tableIdMap().get(tableName));
try (Scanner s = c.createScanner(MetadataTable.NAME, Authorizations.EMPTY)) {
s.setRange(new KeyExtent(id, null, null).toMetadataRange());
s.fetchColumnFamily(MetadataSchema.TabletsSection.DataFileColumnFamily.NAME);
assertTrue(Iterators.size(s.iterator()) > 0);
FileSystem fs = getCluster().getFileSystem();
assertTrue(fs.listStatus(new Path(rootPath + "/accumulo/tables/" + id)).length > 0);
to.delete(tableName);
assertEquals(0, Iterators.size(s.iterator()));
try {
assertEquals(0, fs.listStatus(new Path(rootPath + "/accumulo/tables/" + id)).length);
} catch (FileNotFoundException ex) {
// that's fine, too
}
assertNull(to.tableIdMap().get(tableName));
to.create(tableName);
TestIngest.ingest(c, opts, new BatchWriterOpts());
VerifyIngest.verifyIngest(c, vopts, new ScannerOpts());
to.delete(tableName);
}
}
Aggregations