use of org.apache.accumulo.core.cli.ScannerOpts in project accumulo by apache.
the class WriteLotsIT method writeLots.
@Test
public void writeLots() throws Exception {
final Connector c = getConnector();
final String tableName = getUniqueNames(1)[0];
c.tableOperations().create(tableName);
final AtomicReference<Exception> ref = new AtomicReference<>();
final ClientConfiguration clientConfig = getCluster().getClientConfig();
final int THREADS = 5;
ThreadPoolExecutor tpe = new ThreadPoolExecutor(0, THREADS, 0, TimeUnit.SECONDS, new ArrayBlockingQueue<>(THREADS));
for (int i = 0; i < THREADS; i++) {
final int index = i;
Runnable r = new Runnable() {
@Override
public void run() {
try {
TestIngest.Opts opts = new TestIngest.Opts();
opts.startRow = index * 10000;
opts.rows = 10000;
opts.setTableName(tableName);
if (clientConfig.hasSasl()) {
opts.updateKerberosCredentials(clientConfig);
} else {
opts.setPrincipal(getAdminPrincipal());
}
BatchWriterOpts bwOpts = new BatchWriterOpts();
bwOpts.batchMemory = 1024L * 1024;
bwOpts.batchThreads = 2;
TestIngest.ingest(c, opts, new BatchWriterOpts());
} catch (Exception ex) {
ref.set(ex);
}
}
};
tpe.execute(r);
}
tpe.shutdown();
tpe.awaitTermination(90, TimeUnit.SECONDS);
if (ref.get() != null) {
throw ref.get();
}
VerifyIngest.Opts vopts = new VerifyIngest.Opts();
vopts.rows = 10000 * THREADS;
vopts.setTableName(tableName);
if (clientConfig.hasSasl()) {
vopts.updateKerberosCredentials(clientConfig);
} else {
vopts.setPrincipal(getAdminPrincipal());
}
VerifyIngest.verifyIngest(c, vopts, new ScannerOpts());
}
use of org.apache.accumulo.core.cli.ScannerOpts in project accumulo by apache.
the class RenameIT method renameTest.
@Test
public void renameTest() throws Exception {
String[] tableNames = getUniqueNames(2);
String name1 = tableNames[0];
String name2 = tableNames[1];
BatchWriterOpts bwOpts = new BatchWriterOpts();
ScannerOpts scanOpts = new ScannerOpts();
TestIngest.Opts opts = new TestIngest.Opts();
opts.createTable = true;
opts.setTableName(name1);
final ClientConfiguration clientConfig = cluster.getClientConfig();
if (clientConfig.hasSasl()) {
opts.updateKerberosCredentials(clientConfig);
} else {
opts.setPrincipal(getAdminPrincipal());
}
Connector c = getConnector();
TestIngest.ingest(c, opts, bwOpts);
c.tableOperations().rename(name1, name2);
TestIngest.ingest(c, opts, bwOpts);
VerifyIngest.Opts vopts = new VerifyIngest.Opts();
if (clientConfig.hasSasl()) {
vopts.updateKerberosCredentials(clientConfig);
} else {
vopts.setPrincipal(getAdminPrincipal());
}
vopts.setTableName(name2);
VerifyIngest.verifyIngest(c, vopts, scanOpts);
c.tableOperations().delete(name1);
c.tableOperations().rename(name2, name1);
vopts.setTableName(name1);
VerifyIngest.verifyIngest(c, vopts, scanOpts);
FunctionalTestUtils.assertNoDanglingFateLocks(getConnector().getInstance(), getCluster());
}
use of org.apache.accumulo.core.cli.ScannerOpts in project accumulo by apache.
the class RemoveEntriesForMissingFiles method main.
public static void main(String[] args) throws Exception {
Opts opts = new Opts();
ScannerOpts scanOpts = new ScannerOpts();
BatchWriterOpts bwOpts = new BatchWriterOpts();
opts.parseArgs(RemoveEntriesForMissingFiles.class.getName(), args, scanOpts, bwOpts);
checkAllTables(new ClientContext(opts.getInstance(), new Credentials(opts.getPrincipal(), opts.getToken()), ClientConfiguration.loadDefault()), opts.fix);
}
use of org.apache.accumulo.core.cli.ScannerOpts in project accumulo by apache.
the class GarbageCollectorIT method gcTest.
@Test
public void gcTest() throws Exception {
killMacGc();
Connector c = getConnector();
c.tableOperations().create("test_ingest");
c.tableOperations().setProperty("test_ingest", Property.TABLE_SPLIT_THRESHOLD.getKey(), "5K");
TestIngest.Opts opts = new TestIngest.Opts();
VerifyIngest.Opts vopts = new VerifyIngest.Opts();
vopts.rows = opts.rows = 10000;
vopts.cols = opts.cols = 1;
opts.setPrincipal("root");
vopts.setPrincipal("root");
TestIngest.ingest(c, cluster.getFileSystem(), opts, new BatchWriterOpts());
c.tableOperations().compact("test_ingest", null, null, true, true);
int before = countFiles();
while (true) {
sleepUninterruptibly(1, TimeUnit.SECONDS);
int more = countFiles();
if (more <= before)
break;
before = more;
}
// restart GC
getCluster().start();
sleepUninterruptibly(15, TimeUnit.SECONDS);
int after = countFiles();
VerifyIngest.verifyIngest(c, vopts, new ScannerOpts());
assertTrue(after < before);
}
use of org.apache.accumulo.core.cli.ScannerOpts in project accumulo by apache.
the class QueryMetadataTable method main.
public static void main(String[] args) throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
Opts opts = new Opts();
ScannerOpts scanOpts = new ScannerOpts();
opts.parseArgs(QueryMetadataTable.class.getName(), args, scanOpts);
Connector connector = opts.getConnector();
HashSet<Text> rowSet = new HashSet<>();
int count = 0;
try (Scanner scanner = connector.createScanner(MetadataTable.NAME, opts.auths)) {
scanner.setBatchSize(scanOpts.scanBatchSize);
Text mdrow = new Text(KeyExtent.getMetadataEntry(MetadataTable.ID, null));
for (Entry<Key, Value> entry : scanner) {
System.out.print(".");
if (count % 72 == 0) {
System.out.printf(" %,d%n", count);
}
if (entry.getKey().compareRow(mdrow) == 0 && entry.getKey().getColumnFamily().compareTo(TabletsSection.CurrentLocationColumnFamily.NAME) == 0) {
System.out.println(entry.getKey() + " " + entry.getValue());
location = entry.getValue().toString();
}
if (!entry.getKey().getRow().toString().startsWith(MetadataTable.ID.canonicalID()))
rowSet.add(entry.getKey().getRow());
count++;
}
}
System.out.printf(" %,d%n", count);
ArrayList<Text> rows = new ArrayList<>(rowSet);
Random r = new Random();
ExecutorService tp = Executors.newFixedThreadPool(opts.numThreads);
long t1 = System.currentTimeMillis();
for (int i = 0; i < opts.numQueries; i++) {
int index = r.nextInt(rows.size());
MDTQuery mdtq = new MDTQuery(rows.get(index));
tp.submit(mdtq);
}
tp.shutdown();
try {
tp.awaitTermination(1, TimeUnit.HOURS);
} catch (InterruptedException e) {
log.error("Failed while awaiting the ExcecutorService to terminate.", e);
throw new RuntimeException(e);
}
long t2 = System.currentTimeMillis();
double delta = (t2 - t1) / 1000.0;
System.out.println("time : " + delta + " queries per sec : " + (opts.numQueries / delta));
}
Aggregations