use of org.apache.accumulo.minicluster.impl.MiniAccumuloClusterImpl in project accumulo by apache.
the class SimpleProxyBase method importExportTable.
@Test
public void importExportTable() throws Exception {
// Write some data
String[][] expected = new String[10][];
for (int i = 0; i < 10; i++) {
client.updateAndFlush(creds, tableName, mutation("row" + i, "cf", "cq", "" + i));
expected[i] = new String[] { "row" + i, "cf", "cq", "" + i };
client.flushTable(creds, tableName, null, null, true);
}
assertScan(expected, tableName);
// export/import
MiniAccumuloClusterImpl cluster = SharedMiniClusterBase.getCluster();
FileSystem fs = cluster.getFileSystem();
Path base = cluster.getTemporaryPath();
Path dir = new Path(base, "test");
assertTrue(fs.mkdirs(dir));
Path destDir = new Path(base, "test_dest");
assertTrue(fs.mkdirs(destDir));
client.offlineTable(creds, tableName, false);
client.exportTable(creds, tableName, dir.toString());
// copy files to a new location
FSDataInputStream is = fs.open(new Path(dir, "distcp.txt"));
try (BufferedReader r = new BufferedReader(new InputStreamReader(is, UTF_8))) {
while (true) {
String line = r.readLine();
if (line == null)
break;
Path srcPath = new Path(line);
FileUtil.copy(fs, srcPath, fs, destDir, false, fs.getConf());
}
}
client.deleteTable(creds, tableName);
client.importTable(creds, "testify", destDir.toString());
assertScan(expected, "testify");
client.deleteTable(creds, "testify");
try {
// ACCUMULO-1558 a second import from the same dir should fail, the first import moved the files
client.importTable(creds, "testify2", destDir.toString());
fail();
} catch (Exception e) {
}
assertFalse(client.listTables(creds).contains("testify2"));
}
use of org.apache.accumulo.minicluster.impl.MiniAccumuloClusterImpl in project accumulo by apache.
the class SimpleProxyBase method tableNotFound.
@Test
public void tableNotFound() throws Exception {
final String doesNotExist = "doesNotExists";
try {
client.addConstraint(creds, doesNotExist, NumericValueConstraint.class.getName());
fail("exception not thrown");
} catch (TableNotFoundException ex) {
}
try {
client.addSplits(creds, doesNotExist, Collections.<ByteBuffer>emptySet());
fail("exception not thrown");
} catch (TableNotFoundException ex) {
}
final IteratorSetting setting = new IteratorSetting(100, "slow", SlowIterator.class.getName(), Collections.singletonMap("sleepTime", "200"));
try {
client.attachIterator(creds, doesNotExist, setting, EnumSet.allOf(IteratorScope.class));
fail("exception not thrown");
} catch (TableNotFoundException ex) {
}
try {
client.cancelCompaction(creds, doesNotExist);
fail("exception not thrown");
} catch (TableNotFoundException ex) {
}
try {
client.checkIteratorConflicts(creds, doesNotExist, setting, EnumSet.allOf(IteratorScope.class));
fail("exception not thrown");
} catch (TableNotFoundException ex) {
}
try {
client.clearLocatorCache(creds, doesNotExist);
fail("exception not thrown");
} catch (TableNotFoundException ex) {
}
try {
final String TABLE_TEST = getUniqueNames(1)[0];
client.cloneTable(creds, doesNotExist, TABLE_TEST, false, null, null);
fail("exception not thrown");
} catch (TableNotFoundException ex) {
}
try {
client.compactTable(creds, doesNotExist, null, null, null, true, false, null);
fail("exception not thrown");
} catch (TableNotFoundException ex) {
}
try {
client.createBatchScanner(creds, doesNotExist, new BatchScanOptions());
fail("exception not thrown");
} catch (TableNotFoundException ex) {
}
try {
client.createScanner(creds, doesNotExist, new ScanOptions());
fail("exception not thrown");
} catch (TableNotFoundException ex) {
}
try {
client.createWriter(creds, doesNotExist, new WriterOptions());
fail("exception not thrown");
} catch (TableNotFoundException ex) {
}
try {
client.deleteRows(creds, doesNotExist, null, null);
fail("exception not thrown");
} catch (TableNotFoundException ex) {
}
try {
client.deleteTable(creds, doesNotExist);
fail("exception not thrown");
} catch (TableNotFoundException ex) {
}
try {
client.exportTable(creds, doesNotExist, "/tmp");
fail("exception not thrown");
} catch (TableNotFoundException ex) {
}
try {
client.flushTable(creds, doesNotExist, null, null, false);
fail("exception not thrown");
} catch (TableNotFoundException ex) {
}
try {
client.getIteratorSetting(creds, doesNotExist, "foo", IteratorScope.SCAN);
fail("exception not thrown");
} catch (TableNotFoundException ex) {
}
try {
client.getLocalityGroups(creds, doesNotExist);
fail("exception not thrown");
} catch (TableNotFoundException ex) {
}
try {
client.getMaxRow(creds, doesNotExist, Collections.<ByteBuffer>emptySet(), null, false, null, false);
fail("exception not thrown");
} catch (TableNotFoundException ex) {
}
try {
client.getTableProperties(creds, doesNotExist);
fail("exception not thrown");
} catch (TableNotFoundException ex) {
}
try {
client.grantTablePermission(creds, "root", doesNotExist, TablePermission.WRITE);
fail("exception not thrown");
} catch (TableNotFoundException ex) {
}
try {
client.hasTablePermission(creds, "root", doesNotExist, TablePermission.WRITE);
fail("exception not thrown");
} catch (TableNotFoundException ex) {
}
try {
MiniAccumuloClusterImpl cluster = SharedMiniClusterBase.getCluster();
Path base = cluster.getTemporaryPath();
Path importDir = new Path(base, "importDir");
Path failuresDir = new Path(base, "failuresDir");
assertTrue(cluster.getFileSystem().mkdirs(importDir));
assertTrue(cluster.getFileSystem().mkdirs(failuresDir));
client.importDirectory(creds, doesNotExist, importDir.toString(), failuresDir.toString(), true);
fail("exception not thrown");
} catch (TableNotFoundException ex) {
}
try {
client.listConstraints(creds, doesNotExist);
fail("exception not thrown");
} catch (TableNotFoundException ex) {
}
try {
client.listSplits(creds, doesNotExist, 10000);
fail("exception not thrown");
} catch (TableNotFoundException ex) {
}
try {
client.mergeTablets(creds, doesNotExist, null, null);
fail("exception not thrown");
} catch (TableNotFoundException ex) {
}
try {
client.offlineTable(creds, doesNotExist, false);
fail("exception not thrown");
} catch (TableNotFoundException ex) {
}
try {
client.onlineTable(creds, doesNotExist, false);
fail("exception not thrown");
} catch (TableNotFoundException ex) {
}
try {
client.removeConstraint(creds, doesNotExist, 0);
fail("exception not thrown");
} catch (TableNotFoundException ex) {
}
try {
client.removeIterator(creds, doesNotExist, "name", EnumSet.allOf(IteratorScope.class));
fail("exception not thrown");
} catch (TableNotFoundException ex) {
}
try {
client.removeTableProperty(creds, doesNotExist, Property.TABLE_FILE_MAX.getKey());
fail("exception not thrown");
} catch (TableNotFoundException ex) {
}
try {
client.renameTable(creds, doesNotExist, "someTableName");
fail("exception not thrown");
} catch (TableNotFoundException ex) {
}
try {
client.revokeTablePermission(creds, "root", doesNotExist, TablePermission.ALTER_TABLE);
fail("exception not thrown");
} catch (TableNotFoundException ex) {
}
try {
client.setTableProperty(creds, doesNotExist, Property.TABLE_FILE_MAX.getKey(), "0");
fail("exception not thrown");
} catch (TableNotFoundException ex) {
}
try {
client.splitRangeByTablets(creds, doesNotExist, client.getRowRange(ByteBuffer.wrap("row".getBytes(UTF_8))), 10);
fail("exception not thrown");
} catch (TableNotFoundException ex) {
}
try {
client.updateAndFlush(creds, doesNotExist, new HashMap<ByteBuffer, List<ColumnUpdate>>());
fail("exception not thrown");
} catch (TableNotFoundException ex) {
}
try {
client.getDiskUsage(creds, Collections.singleton(doesNotExist));
fail("exception not thrown");
} catch (TableNotFoundException ex) {
}
try {
client.testTableClassLoad(creds, doesNotExist, VersioningIterator.class.getName(), SortedKeyValueIterator.class.getName());
fail("exception not thrown");
} catch (TableNotFoundException ex) {
}
try {
client.createConditionalWriter(creds, doesNotExist, new ConditionalWriterOptions());
fail("exception not thrown");
} catch (TableNotFoundException ex) {
}
}
use of org.apache.accumulo.minicluster.impl.MiniAccumuloClusterImpl in project accumulo by apache.
the class SimpleProxyBase method testSiteConfiguration.
@Test
public void testSiteConfiguration() throws Exception {
// get something we know is in the site config
MiniAccumuloClusterImpl cluster = SharedMiniClusterBase.getCluster();
Map<String, String> cfg = client.getSiteConfiguration(creds);
assertTrue(cfg.get("instance.dfs.dir").startsWith(cluster.getConfig().getAccumuloDir().getAbsolutePath()));
}
use of org.apache.accumulo.minicluster.impl.MiniAccumuloClusterImpl in project accumulo by apache.
the class TableIT method test.
@Test
public void test() throws Exception {
Assume.assumeThat(getClusterType(), CoreMatchers.is(ClusterType.MINI));
AccumuloCluster cluster = getCluster();
MiniAccumuloClusterImpl mac = (MiniAccumuloClusterImpl) cluster;
String rootPath = mac.getConfig().getDir().getAbsolutePath();
Connector c = getConnector();
TableOperations to = c.tableOperations();
String tableName = getUniqueNames(1)[0];
to.create(tableName);
TestIngest.Opts opts = new TestIngest.Opts();
VerifyIngest.Opts vopts = new VerifyIngest.Opts();
ClientConfiguration clientConfig = getCluster().getClientConfig();
if (clientConfig.hasSasl()) {
opts.updateKerberosCredentials(clientConfig);
vopts.updateKerberosCredentials(clientConfig);
} else {
opts.setPrincipal(getAdminPrincipal());
vopts.setPrincipal(getAdminPrincipal());
}
opts.setTableName(tableName);
TestIngest.ingest(c, opts, new BatchWriterOpts());
to.flush(tableName, null, null, true);
vopts.setTableName(tableName);
VerifyIngest.verifyIngest(c, vopts, new ScannerOpts());
Table.ID id = Table.ID.of(to.tableIdMap().get(tableName));
try (Scanner s = c.createScanner(MetadataTable.NAME, Authorizations.EMPTY)) {
s.setRange(new KeyExtent(id, null, null).toMetadataRange());
s.fetchColumnFamily(MetadataSchema.TabletsSection.DataFileColumnFamily.NAME);
assertTrue(Iterators.size(s.iterator()) > 0);
FileSystem fs = getCluster().getFileSystem();
assertTrue(fs.listStatus(new Path(rootPath + "/accumulo/tables/" + id)).length > 0);
to.delete(tableName);
assertEquals(0, Iterators.size(s.iterator()));
try {
assertEquals(0, fs.listStatus(new Path(rootPath + "/accumulo/tables/" + id)).length);
} catch (FileNotFoundException ex) {
// that's fine, too
}
assertNull(to.tableIdMap().get(tableName));
to.create(tableName);
TestIngest.ingest(c, opts, new BatchWriterOpts());
VerifyIngest.verifyIngest(c, vopts, new ScannerOpts());
to.delete(tableName);
}
}
use of org.apache.accumulo.minicluster.impl.MiniAccumuloClusterImpl in project accumulo by apache.
the class WALSunnyDayIT method test.
@Test
public void test() throws Exception {
MiniAccumuloClusterImpl mac = getCluster();
MiniAccumuloClusterControl control = mac.getClusterControl();
control.stop(GARBAGE_COLLECTOR);
Connector c = getConnector();
String tableName = getUniqueNames(1)[0];
c.tableOperations().create(tableName);
writeSomeData(c, tableName, 1, 1);
// wal markers are added lazily
Map<String, Boolean> wals = getWALsAndAssertCount(c, 2);
for (Boolean b : wals.values()) {
assertTrue("logs should be in use", b);
}
// roll log, get a new next
writeSomeData(c, tableName, 1001, 50);
Map<String, Boolean> walsAfterRoll = getWALsAndAssertCount(c, 3);
assertTrue("new WALs should be a superset of the old WALs", walsAfterRoll.keySet().containsAll(wals.keySet()));
assertEquals("all WALs should be in use", 3, countTrue(walsAfterRoll.values()));
// flush the tables
for (String table : new String[] { tableName, MetadataTable.NAME, RootTable.NAME }) {
c.tableOperations().flush(table, null, null, true);
}
sleepUninterruptibly(1, TimeUnit.SECONDS);
// rolled WAL is no longer in use, but needs to be GC'd
Map<String, Boolean> walsAfterflush = getWALsAndAssertCount(c, 3);
assertEquals("inUse should be 2", 2, countTrue(walsAfterflush.values()));
// let the GC run for a little bit
control.start(GARBAGE_COLLECTOR);
sleepUninterruptibly(5, TimeUnit.SECONDS);
// make sure the unused WAL goes away
getWALsAndAssertCount(c, 2);
control.stop(GARBAGE_COLLECTOR);
// restart the tserver, but don't run recovery on all tablets
control.stop(TABLET_SERVER);
// this delays recovery on the normal tables
assertEquals(0, cluster.exec(SetGoalState.class, "SAFE_MODE").waitFor());
control.start(TABLET_SERVER);
// wait for the metadata table to go back online
getRecoveryMarkers(c);
// allow a little time for the master to notice ASSIGNED_TO_DEAD_SERVER tablets
sleepUninterruptibly(5, TimeUnit.SECONDS);
Map<KeyExtent, List<String>> markers = getRecoveryMarkers(c);
// log.debug("markers " + markers);
assertEquals("one tablet should have markers", 1, markers.keySet().size());
assertEquals("tableId of the keyExtent should be 1", "1", markers.keySet().iterator().next().getTableId().canonicalID());
// put some data in the WAL
assertEquals(0, cluster.exec(SetGoalState.class, "NORMAL").waitFor());
verifySomeData(c, tableName, 1001 * 50 + 1);
writeSomeData(c, tableName, 100, 100);
Map<String, Boolean> walsAfterRestart = getWALsAndAssertCount(c, 4);
// log.debug("wals after " + walsAfterRestart);
assertEquals("used WALs after restart should be 4", 4, countTrue(walsAfterRestart.values()));
control.start(GARBAGE_COLLECTOR);
sleepUninterruptibly(5, TimeUnit.SECONDS);
Map<String, Boolean> walsAfterRestartAndGC = getWALsAndAssertCount(c, 2);
assertEquals("logs in use should be 2", 2, countTrue(walsAfterRestartAndGC.values()));
}
Aggregations