use of org.apache.accumulo.core.client.AccumuloClient in project accumulo by apache.
the class TabletStateChangeIteratorIT method test.
@Test
public void test() throws AccumuloException, AccumuloSecurityException, TableExistsException, TableNotFoundException {
try (AccumuloClient client = Accumulo.newClient().from(getClientProps()).build()) {
String[] tables = getUniqueNames(6);
final String t1 = tables[0];
final String t2 = tables[1];
final String t3 = tables[2];
final String metaCopy1 = tables[3];
final String metaCopy2 = tables[4];
final String metaCopy3 = tables[5];
// create some metadata
createTable(client, t1, true);
createTable(client, t2, false);
createTable(client, t3, true);
// examine a clone of the metadata table, so we can manipulate it
copyTable(client, MetadataTable.NAME, metaCopy1);
State state = new State(client);
int tabletsInFlux = findTabletsNeedingAttention(client, metaCopy1, state);
while (tabletsInFlux > 0) {
log.debug("Waiting for {} tablets for {}", tabletsInFlux, metaCopy1);
UtilWaitThread.sleep(500);
copyTable(client, MetadataTable.NAME, metaCopy1);
tabletsInFlux = findTabletsNeedingAttention(client, metaCopy1, state);
}
assertEquals("No tables should need attention", 0, findTabletsNeedingAttention(client, metaCopy1, state));
// The metadata table stabilized and metaCopy1 contains a copy suitable for testing. Before
// metaCopy1 is modified, copy it for subsequent test.
copyTable(client, metaCopy1, metaCopy2);
copyTable(client, metaCopy1, metaCopy3);
// test the assigned case (no location)
removeLocation(client, metaCopy1, t3);
assertEquals("Should have two tablets without a loc", 2, findTabletsNeedingAttention(client, metaCopy1, state));
// test the cases where the assignment is to a dead tserver
reassignLocation(client, metaCopy2, t3);
assertEquals("Should have one tablet that needs to be unassigned", 1, findTabletsNeedingAttention(client, metaCopy2, state));
// test the cases where there is ongoing merges
state = new State(client) {
@Override
public Collection<MergeInfo> merges() {
TableId tableIdToModify = TableId.of(client.tableOperations().tableIdMap().get(t3));
return Collections.singletonList(new MergeInfo(new KeyExtent(tableIdToModify, null, null), MergeInfo.Operation.MERGE));
}
};
assertEquals("Should have 2 tablets that need to be chopped or unassigned", 1, findTabletsNeedingAttention(client, metaCopy2, state));
// test the bad tablet location state case (inconsistent metadata)
state = new State(client);
addDuplicateLocation(client, metaCopy3, t3);
assertEquals("Should have 1 tablet that needs a metadata repair", 1, findTabletsNeedingAttention(client, metaCopy3, state));
// clean up
dropTables(client, t1, t2, t3, metaCopy1, metaCopy2, metaCopy3);
}
}
use of org.apache.accumulo.core.client.AccumuloClient in project accumulo by apache.
the class ScanIdIT method testScanId.
/**
* @throws Exception
* any exception is a test failure.
*/
@Test
public void testScanId() throws Exception {
final String tableName = getUniqueNames(1)[0];
try (AccumuloClient client = Accumulo.newClient().from(getClientProps()).build()) {
client.tableOperations().create(tableName);
addSplits(client, tableName);
log.info("Splits added");
generateSampleData(client, tableName);
log.info("Generated data for {}", tableName);
attachSlowIterator(client, tableName);
CountDownLatch latch = new CountDownLatch(NUM_SCANNERS);
List<ScannerThread> scanThreadsToClose = new ArrayList<>(NUM_SCANNERS);
for (int scannerIndex = 0; scannerIndex < NUM_SCANNERS; scannerIndex++) {
ScannerThread st = new ScannerThread(client, scannerIndex, tableName, latch);
scanThreadsToClose.add(st);
pool.submit(st);
}
// wait for scanners to report a result.
while (testInProgress.get()) {
if (resultsByWorker.size() < NUM_SCANNERS) {
log.trace("Results reported {}", resultsByWorker.size());
sleepUninterruptibly(750, TimeUnit.MILLISECONDS);
} else {
// each worker has reported at least one result.
testInProgress.set(false);
log.debug("Final result count {}", resultsByWorker.size());
// delay to allow scanners to react to end of test and cleanly close.
sleepUninterruptibly(1, TimeUnit.SECONDS);
}
}
Set<Long> scanIds = getScanIds(client);
assertTrue("Expected at least " + NUM_SCANNERS + " scanIds, but saw " + scanIds.size(), scanIds.size() >= NUM_SCANNERS);
scanThreadsToClose.forEach(st -> {
if (st.scanner != null) {
st.scanner.close();
}
});
while (!(scanIds = getScanIds(client)).isEmpty()) {
log.debug("Waiting for active scans to stop...");
Thread.sleep(200);
}
assertEquals("Expected no scanIds after closing scanners", 0, scanIds.size());
}
}
use of org.apache.accumulo.core.client.AccumuloClient in project accumulo by apache.
the class ShutdownIT method shutdownDuringDeleteTable.
@Test
public void shutdownDuringDeleteTable() throws Exception {
try (AccumuloClient c = Accumulo.newClient().from(getClientProperties()).build()) {
for (int i = 0; i < 10; i++) {
c.tableOperations().create("table" + i);
}
final AtomicReference<Exception> ref = new AtomicReference<>();
Thread async = new Thread(() -> {
try {
for (int i = 0; i < 10; i++) c.tableOperations().delete("table" + i);
} catch (Exception ex) {
ref.set(ex);
}
});
async.start();
sleepUninterruptibly(100, TimeUnit.MILLISECONDS);
assertEquals(0, cluster.exec(Admin.class, "stopAll").getProcess().waitFor());
if (ref.get() != null)
throw ref.get();
}
}
use of org.apache.accumulo.core.client.AccumuloClient in project accumulo by apache.
the class SparseColumnFamilyIT method sparceColumnFamily.
@Test
public void sparceColumnFamily() throws Exception {
String scftt = getUniqueNames(1)[0];
try (AccumuloClient c = Accumulo.newClient().from(getClientProps()).build()) {
c.tableOperations().create(scftt);
try (BatchWriter bw = c.createBatchWriter(scftt)) {
// create file in the tablet that has mostly column family 0, with a few entries for column
// family 1
bw.addMutation(nm(0, 1, 0));
for (int i = 1; i < 99999; i++) {
bw.addMutation(nm(i * 2, 0, i));
}
bw.addMutation(nm(99999 * 2, 1, 99999));
bw.flush();
c.tableOperations().flush(scftt, null, null, true);
// create a file that has column family 1 and 0 interleaved
for (int i = 0; i < 100000; i++) {
bw.addMutation(nm(i * 2 + 1, i % 2 == 0 ? 0 : 1, i));
}
}
c.tableOperations().flush(scftt, null, null, true);
try (Scanner scanner = c.createScanner(scftt, Authorizations.EMPTY)) {
for (int i = 0; i < 200; i++) {
// every time we search for column family 1, it will scan the entire file
// that has mostly column family 0 until the bug is fixed
scanner.setRange(new Range(String.format("%06d", i), null));
scanner.clearColumns();
scanner.setBatchSize(3);
scanner.fetchColumnFamily(new Text(String.format("%03d", 1)));
Iterator<Entry<Key, Value>> iter = scanner.iterator();
if (iter.hasNext()) {
Entry<Key, Value> entry = iter.next();
if (!"001".equals(entry.getKey().getColumnFamilyData().toString())) {
throw new Exception();
}
}
}
}
}
}
use of org.apache.accumulo.core.client.AccumuloClient in project accumulo by apache.
the class SummaryIT method basicSummaryTest.
@Test
public void basicSummaryTest() throws Exception {
final String table = getUniqueNames(1)[0];
try (AccumuloClient c = Accumulo.newClient().from(getClientProps()).build()) {
NewTableConfiguration ntc = new NewTableConfiguration();
SummarizerConfiguration sc1 = SummarizerConfiguration.builder(BasicSummarizer.class.getName()).build();
ntc.enableSummarization(sc1);
c.tableOperations().create(table, ntc);
BatchWriter bw = writeData(table, c);
Collection<Summary> summaries = c.tableOperations().summaries(table).flush(false).retrieve();
assertEquals(0, summaries.size());
LongSummaryStatistics stats = getTimestampStats(table, c);
summaries = c.tableOperations().summaries(table).flush(true).retrieve();
checkSummaries(summaries, sc1, 1, 0, 0, TOTAL_STAT, 100_000L, MIN_TIMESTAMP_STAT, stats.getMin(), MAX_TIMESTAMP_STAT, stats.getMax(), DELETES_STAT, 0L);
Mutation m = new Mutation(String.format("r%09x", 999));
m.put("f1", "q1", "999-0");
m.putDelete("f1", "q2");
bw.addMutation(m);
bw.flush();
c.tableOperations().flush(table, null, null, true);
stats = getTimestampStats(table, c);
summaries = c.tableOperations().summaries(table).retrieve();
checkSummaries(summaries, sc1, 2, 0, 0, TOTAL_STAT, 100_002L, MIN_TIMESTAMP_STAT, stats.getMin(), MAX_TIMESTAMP_STAT, stats.getMax(), DELETES_STAT, 1L);
bw.close();
c.tableOperations().compact(table, new CompactionConfig().setWait(true));
summaries = c.tableOperations().summaries(table).retrieve();
checkSummaries(summaries, sc1, 1, 0, 0, TOTAL_STAT, 100_000L, MIN_TIMESTAMP_STAT, stats.getMin(), MAX_TIMESTAMP_STAT, stats.getMax(), DELETES_STAT, 0L);
// split tablet into two
String sp1 = String.format("r%09x", 50_000);
addSplits(table, c, sp1);
summaries = c.tableOperations().summaries(table).retrieve();
checkSummaries(summaries, sc1, 1, 0, 0, TOTAL_STAT, 100_000L, MIN_TIMESTAMP_STAT, stats.getMin(), MAX_TIMESTAMP_STAT, stats.getMax(), DELETES_STAT, 0L);
// compact 2nd tablet
c.tableOperations().compact(table, new CompactionConfig().setStartRow(new Text(sp1)).setWait(true));
summaries = c.tableOperations().summaries(table).retrieve();
checkSummaries(summaries, sc1, 2, 0, 1, TOTAL_STAT, 113_999L, MIN_TIMESTAMP_STAT, stats.getMin(), MAX_TIMESTAMP_STAT, stats.getMax(), DELETES_STAT, 0L);
// get summaries for first tablet
stats = getTimestampStats(table, c, sp1, null);
summaries = c.tableOperations().summaries(table).startRow(sp1).retrieve();
checkSummaries(summaries, sc1, 1, 0, 0, TOTAL_STAT, 49_999L, MIN_TIMESTAMP_STAT, stats.getMin(), MAX_TIMESTAMP_STAT, stats.getMax(), DELETES_STAT, 0L);
// compact all tablets and regenerate all summaries
c.tableOperations().compact(table, new CompactionConfig());
summaries = c.tableOperations().summaries(table).retrieve();
stats = getTimestampStats(table, c);
checkSummaries(summaries, sc1, 2, 0, 0, TOTAL_STAT, 100_000L, MIN_TIMESTAMP_STAT, stats.getMin(), MAX_TIMESTAMP_STAT, stats.getMax(), DELETES_STAT, 0L);
summaries = c.tableOperations().summaries(table).startRow(String.format("r%09x", 75_000)).endRow(String.format("r%09x", 80_000)).retrieve();
Summary summary = Iterables.getOnlyElement(summaries);
assertEquals(1, summary.getFileStatistics().getTotal());
assertEquals(1, summary.getFileStatistics().getExtra());
long total = summary.getStatistics().get(TOTAL_STAT);
assertTrue("Total " + total + " out of expected range", total > 0 && total <= 10_000);
// test adding and removing
c.tableOperations().removeSummarizers(table, sc -> sc.getClassName().contains("foo"));
List<SummarizerConfiguration> summarizers = c.tableOperations().listSummarizers(table);
assertEquals(1, summarizers.size());
assertTrue(summarizers.contains(sc1));
c.tableOperations().removeSummarizers(table, sc -> sc.getClassName().equals(BasicSummarizer.class.getName()));
summarizers = c.tableOperations().listSummarizers(table);
assertEquals(0, summarizers.size());
c.tableOperations().compact(table, new CompactionConfig().setWait(true));
summaries = c.tableOperations().summaries(table).retrieve();
assertEquals(0, summaries.size());
c.tableOperations().addSummarizers(table, sc1);
c.tableOperations().compact(table, new CompactionConfig().setWait(true));
summaries = c.tableOperations().summaries(table).retrieve();
checkSummaries(summaries, sc1, 2, 0, 0, TOTAL_STAT, 100_000L, MIN_TIMESTAMP_STAT, stats.getMin(), MAX_TIMESTAMP_STAT, stats.getMax(), DELETES_STAT, 0L);
}
}
Aggregations