use of org.apache.accumulo.core.client.Connector in project accumulo by apache.
the class ConcurrentDeleteTableIT method testConcurrentDeleteTablesOps.
@Test
public void testConcurrentDeleteTablesOps() throws Exception {
final Connector c = getConnector();
String[] tables = getUniqueNames(2);
TreeSet<Text> splits = createSplits();
ExecutorService es = Executors.newFixedThreadPool(20);
int count = 0;
for (final String table : tables) {
c.tableOperations().create(table);
c.tableOperations().addSplits(table, splits);
writeData(c, table);
if (count == 1) {
c.tableOperations().flush(table, null, null, true);
}
count++;
int numDeleteOps = 20;
final CountDownLatch cdl = new CountDownLatch(numDeleteOps);
List<Future<?>> futures = new ArrayList<>();
for (int i = 0; i < numDeleteOps; i++) {
Future<?> future = es.submit(new Runnable() {
@Override
public void run() {
try {
cdl.countDown();
cdl.await();
c.tableOperations().delete(table);
} catch (TableNotFoundException e) {
// expected
} catch (InterruptedException | AccumuloException | AccumuloSecurityException e) {
throw new RuntimeException(e);
}
}
});
futures.add(future);
}
for (Future<?> future : futures) {
future.get();
}
try {
c.createScanner(table, Authorizations.EMPTY);
Assert.fail("Expected table " + table + " to be gone.");
} catch (TableNotFoundException tnfe) {
// expected
}
FunctionalTestUtils.assertNoDanglingFateLocks(getConnector().getInstance(), getCluster());
}
es.shutdown();
}
use of org.apache.accumulo.core.client.Connector in project accumulo by apache.
the class ConfigurableCompactionIT method test.
@Test
public void test() throws Exception {
final Connector c = getConnector();
final String tableName = getUniqueNames(1)[0];
c.tableOperations().create(tableName);
c.tableOperations().setProperty(tableName, Property.TABLE_COMPACTION_STRATEGY.getKey(), SimpleCompactionStrategy.class.getName());
runTest(c, tableName, 3);
c.tableOperations().setProperty(tableName, Property.TABLE_COMPACTION_STRATEGY_PREFIX.getKey() + "count", "" + 5);
runTest(c, tableName, 5);
}
use of org.apache.accumulo.core.client.Connector in project accumulo by apache.
the class VolumeChooserIT method twoTablesRandomVolumeChooser.
// Test that uses two tables with 10 split points each. They each use the RandomVolumeChooser to choose volumes.
@Test
public void twoTablesRandomVolumeChooser() throws Exception {
log.info("Starting twoTablesRandomVolumeChooser()");
// Create namespace
Connector connector = getConnector();
connector.namespaceOperations().create(namespace1);
// Set properties on the namespace
connector.namespaceOperations().setProperty(namespace1, PerTableVolumeChooser.TABLE_VOLUME_CHOOSER, RandomVolumeChooser.class.getName());
// Create table1 on namespace1
String tableName = namespace1 + ".1";
connector.tableOperations().create(tableName);
Table.ID tableID = Table.ID.of(connector.tableOperations().tableIdMap().get(tableName));
// Add 10 splits to the table
addSplits(connector, tableName);
// Write some data to the table
writeAndReadData(connector, tableName);
// Verify the new files are written to the Volumes specified
verifyVolumes(connector, tableName, TabletsSection.getRange(tableID), v1.toString() + "," + v2.toString() + "," + v4.toString());
connector.namespaceOperations().create(namespace2);
// Set properties on the namespace
connector.namespaceOperations().setProperty(namespace2, PerTableVolumeChooser.TABLE_VOLUME_CHOOSER, RandomVolumeChooser.class.getName());
// Create table2 on namespace2
String tableName2 = namespace2 + ".1";
connector.tableOperations().create(tableName2);
Table.ID tableID2 = Table.ID.of(connector.tableOperations().tableIdMap().get(tableName2));
// / Add 10 splits to the table
addSplits(connector, tableName2);
// Write some data to the table
writeAndReadData(connector, tableName2);
// Verify the new files are written to the Volumes specified
verifyVolumes(connector, tableName2, TabletsSection.getRange(tableID2), v1.toString() + "," + v2.toString() + "," + v4.toString());
}
use of org.apache.accumulo.core.client.Connector in project accumulo by apache.
the class VolumeIT method testRelativePaths.
@Test
public void testRelativePaths() throws Exception {
List<String> expected = new ArrayList<>();
Connector connector = getConnector();
String tableName = getUniqueNames(1)[0];
connector.tableOperations().create(tableName, new NewTableConfiguration().withoutDefaultIterators());
Table.ID tableId = Table.ID.of(connector.tableOperations().tableIdMap().get(tableName));
SortedSet<Text> partitions = new TreeSet<>();
// with some splits
for (String s : "c,g,k,p,s,v".split(",")) partitions.add(new Text(s));
connector.tableOperations().addSplits(tableName, partitions);
BatchWriter bw = connector.createBatchWriter(tableName, new BatchWriterConfig());
// create two files in each tablet
String[] rows = "a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,r,s,t,u,v,w,x,y,z".split(",");
for (String s : rows) {
Mutation m = new Mutation(s);
m.put("cf1", "cq1", "1");
bw.addMutation(m);
expected.add(s + ":cf1:cq1:1");
}
bw.flush();
connector.tableOperations().flush(tableName, null, null, true);
for (String s : rows) {
Mutation m = new Mutation(s);
m.put("cf1", "cq1", "2");
bw.addMutation(m);
expected.add(s + ":cf1:cq1:2");
}
bw.close();
connector.tableOperations().flush(tableName, null, null, true);
verifyData(expected, connector.createScanner(tableName, Authorizations.EMPTY));
connector.tableOperations().offline(tableName, true);
connector.securityOperations().grantTablePermission("root", MetadataTable.NAME, TablePermission.WRITE);
try (Scanner metaScanner = connector.createScanner(MetadataTable.NAME, Authorizations.EMPTY)) {
metaScanner.fetchColumnFamily(MetadataSchema.TabletsSection.DataFileColumnFamily.NAME);
metaScanner.setRange(new KeyExtent(tableId, null, null).toMetadataRange());
BatchWriter mbw = connector.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
for (Entry<Key, Value> entry : metaScanner) {
String cq = entry.getKey().getColumnQualifier().toString();
if (cq.startsWith(v1.toString())) {
Path path = new Path(cq);
String relPath = "/" + path.getParent().getName() + "/" + path.getName();
Mutation fileMut = new Mutation(entry.getKey().getRow());
fileMut.putDelete(entry.getKey().getColumnFamily(), entry.getKey().getColumnQualifier());
fileMut.put(entry.getKey().getColumnFamily().toString(), relPath, entry.getValue().toString());
mbw.addMutation(fileMut);
}
}
mbw.close();
connector.tableOperations().online(tableName, true);
verifyData(expected, connector.createScanner(tableName, Authorizations.EMPTY));
connector.tableOperations().compact(tableName, null, null, true, true);
verifyData(expected, connector.createScanner(tableName, Authorizations.EMPTY));
for (Entry<Key, Value> entry : metaScanner) {
String cq = entry.getKey().getColumnQualifier().toString();
Path path = new Path(cq);
Assert.assertTrue("relative path not deleted " + path.toString(), path.depth() > 2);
}
}
}
use of org.apache.accumulo.core.client.Connector in project accumulo by apache.
the class VolumeIT method verifyVolumesUsed.
private void verifyVolumesUsed(String tableName, boolean shouldExist, Path... paths) throws Exception {
Connector conn = getConnector();
List<String> expected = new ArrayList<>();
for (int i = 0; i < 100; i++) {
String row = String.format("%06d", i * 100 + 3);
expected.add(row + ":cf1:cq1:1");
}
if (!conn.tableOperations().exists(tableName)) {
Assert.assertFalse(shouldExist);
writeData(tableName, conn);
verifyData(expected, conn.createScanner(tableName, Authorizations.EMPTY));
conn.tableOperations().flush(tableName, null, null, true);
}
verifyData(expected, conn.createScanner(tableName, Authorizations.EMPTY));
Table.ID tableId = Table.ID.of(conn.tableOperations().tableIdMap().get(tableName));
try (Scanner metaScanner = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY)) {
MetadataSchema.TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.fetch(metaScanner);
metaScanner.fetchColumnFamily(MetadataSchema.TabletsSection.DataFileColumnFamily.NAME);
metaScanner.setRange(new KeyExtent(tableId, null, null).toMetadataRange());
int[] counts = new int[paths.length];
outer: for (Entry<Key, Value> entry : metaScanner) {
String cf = entry.getKey().getColumnFamily().toString();
String cq = entry.getKey().getColumnQualifier().toString();
String path;
if (cf.equals(MetadataSchema.TabletsSection.DataFileColumnFamily.NAME.toString()))
path = cq;
else
path = entry.getValue().toString();
for (int i = 0; i < paths.length; i++) {
if (path.startsWith(paths[i].toString())) {
counts[i]++;
continue outer;
}
}
Assert.fail("Unexpected volume " + path);
}
// keep retrying until WAL state information in ZooKeeper stabilizes or until test times out
retry: while (true) {
Instance i = conn.getInstance();
ZooReaderWriter zk = new ZooReaderWriter(i.getZooKeepers(), i.getZooKeepersSessionTimeOut(), "");
WalStateManager wals = new WalStateManager(i, zk);
try {
outer: for (Entry<Path, WalState> entry : wals.getAllState().entrySet()) {
for (Path path : paths) {
if (entry.getKey().toString().startsWith(path.toString())) {
continue outer;
}
}
log.warn("Unexpected volume " + entry.getKey() + " (" + entry.getValue() + ")");
continue retry;
}
} catch (WalMarkerException e) {
Throwable cause = e.getCause();
if (cause instanceof NoNodeException) {
// ignore WALs being cleaned up
continue retry;
}
throw e;
}
break;
}
// if a volume is chosen randomly for each tablet, then the probability that a volume will not be chosen for any tablet is ((num_volumes -
// 1)/num_volumes)^num_tablets. For 100 tablets and 3 volumes the probability that only 2 volumes would be chosen is 2.46e-18
int sum = 0;
for (int count : counts) {
Assert.assertTrue(count > 0);
sum += count;
}
Assert.assertEquals(200, sum);
}
}
Aggregations