use of org.apache.accumulo.core.cli.BatchWriterOpts in project accumulo by apache.
the class BinaryIT method runTest.
public static void runTest(Connector c, String tableName) throws Exception {
BatchWriterOpts bwOpts = new BatchWriterOpts();
ScannerOpts scanOpts = new ScannerOpts();
TestBinaryRows.Opts opts = new TestBinaryRows.Opts();
opts.setTableName(tableName);
opts.start = 0;
opts.num = 100000;
opts.mode = "ingest";
TestBinaryRows.runTest(c, opts, bwOpts, scanOpts);
opts.mode = "verify";
TestBinaryRows.runTest(c, opts, bwOpts, scanOpts);
opts.start = 25000;
opts.num = 50000;
opts.mode = "delete";
TestBinaryRows.runTest(c, opts, bwOpts, scanOpts);
opts.start = 0;
opts.num = 25000;
opts.mode = "verify";
TestBinaryRows.runTest(c, opts, bwOpts, scanOpts);
opts.start = 75000;
opts.num = 25000;
opts.mode = "randomLookups";
TestBinaryRows.runTest(c, opts, bwOpts, scanOpts);
opts.start = 25000;
opts.num = 50000;
opts.mode = "verifyDeleted";
TestBinaryRows.runTest(c, opts, bwOpts, scanOpts);
}
use of org.apache.accumulo.core.cli.BatchWriterOpts in project accumulo by apache.
the class TestMultiTableIngest method main.
public static void main(String[] args) throws Exception {
ArrayList<String> tableNames = new ArrayList<>();
Opts opts = new Opts();
ScannerOpts scanOpts = new ScannerOpts();
BatchWriterOpts bwOpts = new BatchWriterOpts();
opts.parseArgs(TestMultiTableIngest.class.getName(), args, scanOpts, bwOpts);
// create the test table within accumulo
Connector connector;
try {
connector = opts.getConnector();
} catch (AccumuloException | AccumuloSecurityException e) {
throw new RuntimeException(e);
}
for (int i = 0; i < opts.tables; i++) {
tableNames.add(String.format(opts.prefix + "%04d", i));
}
if (!opts.readonly) {
for (String table : tableNames) connector.tableOperations().create(table);
MultiTableBatchWriter b;
try {
b = connector.createMultiTableBatchWriter(bwOpts.getBatchWriterConfig());
} catch (Exception e) {
throw new RuntimeException(e);
}
// populate
for (int i = 0; i < opts.count; i++) {
Mutation m = new Mutation(new Text(String.format("%06d", i)));
m.put(new Text("col" + Integer.toString((i % 3) + 1)), new Text("qual"), new Value("junk".getBytes(UTF_8)));
b.getBatchWriter(tableNames.get(i % tableNames.size())).addMutation(m);
}
try {
b.close();
} catch (MutationsRejectedException e) {
throw new RuntimeException(e);
}
}
try {
readBack(opts, scanOpts, connector, tableNames);
} catch (Exception e) {
throw new RuntimeException(e);
}
}
use of org.apache.accumulo.core.cli.BatchWriterOpts in project accumulo by apache.
the class ReadWriteIT method ingest.
public static void ingest(Connector connector, ClientConfiguration clientConfig, String principal, int rows, int cols, int width, int offset, String colf, String tableName) throws Exception {
TestIngest.Opts opts = new TestIngest.Opts();
opts.rows = rows;
opts.cols = cols;
opts.dataSize = width;
opts.startRow = offset;
opts.columnFamily = colf;
opts.createTable = true;
opts.setTableName(tableName);
if (clientConfig.hasSasl()) {
opts.updateKerberosCredentials(clientConfig);
} else {
opts.setPrincipal(principal);
}
TestIngest.ingest(connector, opts, new BatchWriterOpts());
}
use of org.apache.accumulo.core.cli.BatchWriterOpts in project accumulo by apache.
the class SimpleBalancerFairnessIT method simpleBalancerFairness.
@Test
public void simpleBalancerFairness() throws Exception {
Connector c = getConnector();
c.tableOperations().create("test_ingest");
c.tableOperations().setProperty("test_ingest", Property.TABLE_SPLIT_THRESHOLD.getKey(), "10K");
c.tableOperations().create("unused");
TreeSet<Text> splits = TestIngest.getSplitPoints(0, 10000000, 500);
log.info("Creating {} splits", splits.size());
c.tableOperations().addSplits("unused", splits);
List<String> tservers = c.instanceOperations().getTabletServers();
TestIngest.Opts opts = new TestIngest.Opts();
opts.rows = 50000;
opts.setPrincipal("root");
TestIngest.ingest(c, opts, new BatchWriterOpts());
c.tableOperations().flush("test_ingest", null, null, false);
sleepUninterruptibly(45, TimeUnit.SECONDS);
Credentials creds = new Credentials("root", new PasswordToken(ROOT_PASSWORD));
ClientContext context = new ClientContext(c.getInstance(), creds, getClientConfig());
MasterMonitorInfo stats = null;
int unassignedTablets = 1;
for (int i = 0; unassignedTablets > 0 && i < 10; i++) {
MasterClientService.Iface client = null;
while (true) {
try {
client = MasterClient.getConnectionWithRetry(context);
stats = client.getMasterStats(Tracer.traceInfo(), creds.toThrift(c.getInstance()));
break;
} catch (ThriftNotActiveServiceException e) {
// Let it loop, fetching a new location
sleepUninterruptibly(100, TimeUnit.MILLISECONDS);
} finally {
if (client != null)
MasterClient.close(client);
}
}
unassignedTablets = stats.getUnassignedTablets();
if (unassignedTablets > 0) {
log.info("Found {} unassigned tablets, sleeping 3 seconds for tablet assignment", unassignedTablets);
Thread.sleep(3000);
}
}
assertEquals("Unassigned tablets were not assigned within 30 seconds", 0, unassignedTablets);
// Compute online tablets per tserver
List<Integer> counts = new ArrayList<>();
for (TabletServerStatus server : stats.tServerInfo) {
int count = 0;
for (TableInfo table : server.tableMap.values()) {
count += table.onlineTablets;
}
counts.add(count);
}
assertTrue("Expected to have at least two TabletServers", counts.size() > 1);
for (int i = 1; i < counts.size(); i++) {
int diff = Math.abs(counts.get(0) - counts.get(i));
assertTrue("Expected difference in tablets to be less than or equal to " + counts.size() + " but was " + diff + ". Counts " + counts, diff <= tservers.size());
}
}
use of org.apache.accumulo.core.cli.BatchWriterOpts in project accumulo by apache.
the class SplitIT method tabletShouldSplit.
@Test
public void tabletShouldSplit() throws Exception {
Connector c = getConnector();
String table = getUniqueNames(1)[0];
c.tableOperations().create(table);
c.tableOperations().setProperty(table, Property.TABLE_SPLIT_THRESHOLD.getKey(), "256K");
c.tableOperations().setProperty(table, Property.TABLE_FILE_COMPRESSED_BLOCK_SIZE.getKey(), "1K");
TestIngest.Opts opts = new TestIngest.Opts();
VerifyIngest.Opts vopts = new VerifyIngest.Opts();
opts.rows = 100000;
opts.setTableName(table);
ClientConfiguration clientConfig = cluster.getClientConfig();
if (clientConfig.hasSasl()) {
opts.updateKerberosCredentials(clientConfig);
vopts.updateKerberosCredentials(clientConfig);
} else {
opts.setPrincipal(getAdminPrincipal());
vopts.setPrincipal(getAdminPrincipal());
}
TestIngest.ingest(c, opts, new BatchWriterOpts());
vopts.rows = opts.rows;
vopts.setTableName(table);
VerifyIngest.verifyIngest(c, vopts, new ScannerOpts());
while (c.tableOperations().listSplits(table).size() < 10) {
sleepUninterruptibly(15, TimeUnit.SECONDS);
}
Table.ID id = Table.ID.of(c.tableOperations().tableIdMap().get(table));
try (Scanner s = c.createScanner(MetadataTable.NAME, Authorizations.EMPTY)) {
KeyExtent extent = new KeyExtent(id, null, null);
s.setRange(extent.toMetadataRange());
MetadataSchema.TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.fetch(s);
int count = 0;
int shortened = 0;
for (Entry<Key, Value> entry : s) {
extent = new KeyExtent(entry.getKey().getRow(), entry.getValue());
if (extent.getEndRow() != null && extent.getEndRow().toString().length() < 14)
shortened++;
count++;
}
assertTrue("Shortened should be greater than zero: " + shortened, shortened > 0);
assertTrue("Count should be cgreater than 10: " + count, count > 10);
}
String[] args;
if (clientConfig.hasSasl()) {
ClusterUser rootUser = getAdminUser();
args = new String[] { "-i", cluster.getInstanceName(), "-u", rootUser.getPrincipal(), "--keytab", rootUser.getKeytab().getAbsolutePath(), "-z", cluster.getZooKeepers() };
} else {
PasswordToken token = (PasswordToken) getAdminToken();
args = new String[] { "-i", cluster.getInstanceName(), "-u", "root", "-p", new String(token.getPassword(), UTF_8), "-z", cluster.getZooKeepers() };
}
assertEquals(0, getCluster().getClusterControl().exec(CheckForMetadataProblems.class, args));
}
Aggregations