Search in sources :

Example 11 with MiniAccumuloClusterImpl

use of org.apache.accumulo.minicluster.impl.MiniAccumuloClusterImpl in project accumulo by apache.

the class SimpleProxyBase method importExportTable.

@Test
public void importExportTable() throws Exception {
    // Write some data
    String[][] expected = new String[10][];
    for (int i = 0; i < 10; i++) {
        client.updateAndFlush(creds, tableName, mutation("row" + i, "cf", "cq", "" + i));
        expected[i] = new String[] { "row" + i, "cf", "cq", "" + i };
        client.flushTable(creds, tableName, null, null, true);
    }
    assertScan(expected, tableName);
    // export/import
    MiniAccumuloClusterImpl cluster = SharedMiniClusterBase.getCluster();
    FileSystem fs = cluster.getFileSystem();
    Path base = cluster.getTemporaryPath();
    Path dir = new Path(base, "test");
    assertTrue(fs.mkdirs(dir));
    Path destDir = new Path(base, "test_dest");
    assertTrue(fs.mkdirs(destDir));
    client.offlineTable(creds, tableName, false);
    client.exportTable(creds, tableName, dir.toString());
    // copy files to a new location
    FSDataInputStream is = fs.open(new Path(dir, "distcp.txt"));
    try (BufferedReader r = new BufferedReader(new InputStreamReader(is, UTF_8))) {
        while (true) {
            String line = r.readLine();
            if (line == null)
                break;
            Path srcPath = new Path(line);
            FileUtil.copy(fs, srcPath, fs, destDir, false, fs.getConf());
        }
    }
    client.deleteTable(creds, tableName);
    client.importTable(creds, "testify", destDir.toString());
    assertScan(expected, "testify");
    client.deleteTable(creds, "testify");
    try {
        // ACCUMULO-1558 a second import from the same dir should fail, the first import moved the files
        client.importTable(creds, "testify2", destDir.toString());
        fail();
    } catch (Exception e) {
    }
    assertFalse(client.listTables(creds).contains("testify2"));
}
Also used : Path(org.apache.hadoop.fs.Path) InputStreamReader(java.io.InputStreamReader) FileSystem(org.apache.hadoop.fs.FileSystem) BufferedReader(java.io.BufferedReader) FSDataInputStream(org.apache.hadoop.fs.FSDataInputStream) MiniAccumuloClusterImpl(org.apache.accumulo.minicluster.impl.MiniAccumuloClusterImpl) NumericValueConstraint(org.apache.accumulo.test.constraints.NumericValueConstraint) TableNotFoundException(org.apache.accumulo.proxy.thrift.TableNotFoundException) MutationsRejectedException(org.apache.accumulo.proxy.thrift.MutationsRejectedException) NamespaceNotEmptyException(org.apache.accumulo.proxy.thrift.NamespaceNotEmptyException) AccumuloSecurityException(org.apache.accumulo.proxy.thrift.AccumuloSecurityException) TException(org.apache.thrift.TException) NamespaceExistsException(org.apache.accumulo.proxy.thrift.NamespaceExistsException) NamespaceNotFoundException(org.apache.accumulo.proxy.thrift.NamespaceNotFoundException) TApplicationException(org.apache.thrift.TApplicationException) TableExistsException(org.apache.accumulo.proxy.thrift.TableExistsException) Test(org.junit.Test)

Example 12 with MiniAccumuloClusterImpl

use of org.apache.accumulo.minicluster.impl.MiniAccumuloClusterImpl in project accumulo by apache.

the class SimpleProxyBase method tableNotFound.

@Test
public void tableNotFound() throws Exception {
    final String doesNotExist = "doesNotExists";
    try {
        client.addConstraint(creds, doesNotExist, NumericValueConstraint.class.getName());
        fail("exception not thrown");
    } catch (TableNotFoundException ex) {
    }
    try {
        client.addSplits(creds, doesNotExist, Collections.<ByteBuffer>emptySet());
        fail("exception not thrown");
    } catch (TableNotFoundException ex) {
    }
    final IteratorSetting setting = new IteratorSetting(100, "slow", SlowIterator.class.getName(), Collections.singletonMap("sleepTime", "200"));
    try {
        client.attachIterator(creds, doesNotExist, setting, EnumSet.allOf(IteratorScope.class));
        fail("exception not thrown");
    } catch (TableNotFoundException ex) {
    }
    try {
        client.cancelCompaction(creds, doesNotExist);
        fail("exception not thrown");
    } catch (TableNotFoundException ex) {
    }
    try {
        client.checkIteratorConflicts(creds, doesNotExist, setting, EnumSet.allOf(IteratorScope.class));
        fail("exception not thrown");
    } catch (TableNotFoundException ex) {
    }
    try {
        client.clearLocatorCache(creds, doesNotExist);
        fail("exception not thrown");
    } catch (TableNotFoundException ex) {
    }
    try {
        final String TABLE_TEST = getUniqueNames(1)[0];
        client.cloneTable(creds, doesNotExist, TABLE_TEST, false, null, null);
        fail("exception not thrown");
    } catch (TableNotFoundException ex) {
    }
    try {
        client.compactTable(creds, doesNotExist, null, null, null, true, false, null);
        fail("exception not thrown");
    } catch (TableNotFoundException ex) {
    }
    try {
        client.createBatchScanner(creds, doesNotExist, new BatchScanOptions());
        fail("exception not thrown");
    } catch (TableNotFoundException ex) {
    }
    try {
        client.createScanner(creds, doesNotExist, new ScanOptions());
        fail("exception not thrown");
    } catch (TableNotFoundException ex) {
    }
    try {
        client.createWriter(creds, doesNotExist, new WriterOptions());
        fail("exception not thrown");
    } catch (TableNotFoundException ex) {
    }
    try {
        client.deleteRows(creds, doesNotExist, null, null);
        fail("exception not thrown");
    } catch (TableNotFoundException ex) {
    }
    try {
        client.deleteTable(creds, doesNotExist);
        fail("exception not thrown");
    } catch (TableNotFoundException ex) {
    }
    try {
        client.exportTable(creds, doesNotExist, "/tmp");
        fail("exception not thrown");
    } catch (TableNotFoundException ex) {
    }
    try {
        client.flushTable(creds, doesNotExist, null, null, false);
        fail("exception not thrown");
    } catch (TableNotFoundException ex) {
    }
    try {
        client.getIteratorSetting(creds, doesNotExist, "foo", IteratorScope.SCAN);
        fail("exception not thrown");
    } catch (TableNotFoundException ex) {
    }
    try {
        client.getLocalityGroups(creds, doesNotExist);
        fail("exception not thrown");
    } catch (TableNotFoundException ex) {
    }
    try {
        client.getMaxRow(creds, doesNotExist, Collections.<ByteBuffer>emptySet(), null, false, null, false);
        fail("exception not thrown");
    } catch (TableNotFoundException ex) {
    }
    try {
        client.getTableProperties(creds, doesNotExist);
        fail("exception not thrown");
    } catch (TableNotFoundException ex) {
    }
    try {
        client.grantTablePermission(creds, "root", doesNotExist, TablePermission.WRITE);
        fail("exception not thrown");
    } catch (TableNotFoundException ex) {
    }
    try {
        client.hasTablePermission(creds, "root", doesNotExist, TablePermission.WRITE);
        fail("exception not thrown");
    } catch (TableNotFoundException ex) {
    }
    try {
        MiniAccumuloClusterImpl cluster = SharedMiniClusterBase.getCluster();
        Path base = cluster.getTemporaryPath();
        Path importDir = new Path(base, "importDir");
        Path failuresDir = new Path(base, "failuresDir");
        assertTrue(cluster.getFileSystem().mkdirs(importDir));
        assertTrue(cluster.getFileSystem().mkdirs(failuresDir));
        client.importDirectory(creds, doesNotExist, importDir.toString(), failuresDir.toString(), true);
        fail("exception not thrown");
    } catch (TableNotFoundException ex) {
    }
    try {
        client.listConstraints(creds, doesNotExist);
        fail("exception not thrown");
    } catch (TableNotFoundException ex) {
    }
    try {
        client.listSplits(creds, doesNotExist, 10000);
        fail("exception not thrown");
    } catch (TableNotFoundException ex) {
    }
    try {
        client.mergeTablets(creds, doesNotExist, null, null);
        fail("exception not thrown");
    } catch (TableNotFoundException ex) {
    }
    try {
        client.offlineTable(creds, doesNotExist, false);
        fail("exception not thrown");
    } catch (TableNotFoundException ex) {
    }
    try {
        client.onlineTable(creds, doesNotExist, false);
        fail("exception not thrown");
    } catch (TableNotFoundException ex) {
    }
    try {
        client.removeConstraint(creds, doesNotExist, 0);
        fail("exception not thrown");
    } catch (TableNotFoundException ex) {
    }
    try {
        client.removeIterator(creds, doesNotExist, "name", EnumSet.allOf(IteratorScope.class));
        fail("exception not thrown");
    } catch (TableNotFoundException ex) {
    }
    try {
        client.removeTableProperty(creds, doesNotExist, Property.TABLE_FILE_MAX.getKey());
        fail("exception not thrown");
    } catch (TableNotFoundException ex) {
    }
    try {
        client.renameTable(creds, doesNotExist, "someTableName");
        fail("exception not thrown");
    } catch (TableNotFoundException ex) {
    }
    try {
        client.revokeTablePermission(creds, "root", doesNotExist, TablePermission.ALTER_TABLE);
        fail("exception not thrown");
    } catch (TableNotFoundException ex) {
    }
    try {
        client.setTableProperty(creds, doesNotExist, Property.TABLE_FILE_MAX.getKey(), "0");
        fail("exception not thrown");
    } catch (TableNotFoundException ex) {
    }
    try {
        client.splitRangeByTablets(creds, doesNotExist, client.getRowRange(ByteBuffer.wrap("row".getBytes(UTF_8))), 10);
        fail("exception not thrown");
    } catch (TableNotFoundException ex) {
    }
    try {
        client.updateAndFlush(creds, doesNotExist, new HashMap<ByteBuffer, List<ColumnUpdate>>());
        fail("exception not thrown");
    } catch (TableNotFoundException ex) {
    }
    try {
        client.getDiskUsage(creds, Collections.singleton(doesNotExist));
        fail("exception not thrown");
    } catch (TableNotFoundException ex) {
    }
    try {
        client.testTableClassLoad(creds, doesNotExist, VersioningIterator.class.getName(), SortedKeyValueIterator.class.getName());
        fail("exception not thrown");
    } catch (TableNotFoundException ex) {
    }
    try {
        client.createConditionalWriter(creds, doesNotExist, new ConditionalWriterOptions());
        fail("exception not thrown");
    } catch (TableNotFoundException ex) {
    }
}
Also used : Path(org.apache.hadoop.fs.Path) BatchScanOptions(org.apache.accumulo.proxy.thrift.BatchScanOptions) SortedKeyValueIterator(org.apache.accumulo.core.iterators.SortedKeyValueIterator) ByteBuffer(java.nio.ByteBuffer) TableNotFoundException(org.apache.accumulo.proxy.thrift.TableNotFoundException) IteratorSetting(org.apache.accumulo.proxy.thrift.IteratorSetting) ConditionalWriterOptions(org.apache.accumulo.proxy.thrift.ConditionalWriterOptions) ConditionalWriterOptions(org.apache.accumulo.proxy.thrift.ConditionalWriterOptions) WriterOptions(org.apache.accumulo.proxy.thrift.WriterOptions) ScanOptions(org.apache.accumulo.proxy.thrift.ScanOptions) BatchScanOptions(org.apache.accumulo.proxy.thrift.BatchScanOptions) ArrayList(java.util.ArrayList) List(java.util.List) IteratorScope(org.apache.accumulo.proxy.thrift.IteratorScope) VersioningIterator(org.apache.accumulo.core.iterators.user.VersioningIterator) NumericValueConstraint(org.apache.accumulo.test.constraints.NumericValueConstraint) MiniAccumuloClusterImpl(org.apache.accumulo.minicluster.impl.MiniAccumuloClusterImpl) SlowIterator(org.apache.accumulo.test.functional.SlowIterator) Test(org.junit.Test)

Example 13 with MiniAccumuloClusterImpl

use of org.apache.accumulo.minicluster.impl.MiniAccumuloClusterImpl in project accumulo by apache.

the class SimpleProxyBase method testSiteConfiguration.

@Test
public void testSiteConfiguration() throws Exception {
    // get something we know is in the site config
    MiniAccumuloClusterImpl cluster = SharedMiniClusterBase.getCluster();
    Map<String, String> cfg = client.getSiteConfiguration(creds);
    assertTrue(cfg.get("instance.dfs.dir").startsWith(cluster.getConfig().getAccumuloDir().getAbsolutePath()));
}
Also used : MiniAccumuloClusterImpl(org.apache.accumulo.minicluster.impl.MiniAccumuloClusterImpl) Test(org.junit.Test)

Example 14 with MiniAccumuloClusterImpl

use of org.apache.accumulo.minicluster.impl.MiniAccumuloClusterImpl in project accumulo by apache.

the class TableIT method test.

@Test
public void test() throws Exception {
    Assume.assumeThat(getClusterType(), CoreMatchers.is(ClusterType.MINI));
    AccumuloCluster cluster = getCluster();
    MiniAccumuloClusterImpl mac = (MiniAccumuloClusterImpl) cluster;
    String rootPath = mac.getConfig().getDir().getAbsolutePath();
    Connector c = getConnector();
    TableOperations to = c.tableOperations();
    String tableName = getUniqueNames(1)[0];
    to.create(tableName);
    TestIngest.Opts opts = new TestIngest.Opts();
    VerifyIngest.Opts vopts = new VerifyIngest.Opts();
    ClientConfiguration clientConfig = getCluster().getClientConfig();
    if (clientConfig.hasSasl()) {
        opts.updateKerberosCredentials(clientConfig);
        vopts.updateKerberosCredentials(clientConfig);
    } else {
        opts.setPrincipal(getAdminPrincipal());
        vopts.setPrincipal(getAdminPrincipal());
    }
    opts.setTableName(tableName);
    TestIngest.ingest(c, opts, new BatchWriterOpts());
    to.flush(tableName, null, null, true);
    vopts.setTableName(tableName);
    VerifyIngest.verifyIngest(c, vopts, new ScannerOpts());
    Table.ID id = Table.ID.of(to.tableIdMap().get(tableName));
    try (Scanner s = c.createScanner(MetadataTable.NAME, Authorizations.EMPTY)) {
        s.setRange(new KeyExtent(id, null, null).toMetadataRange());
        s.fetchColumnFamily(MetadataSchema.TabletsSection.DataFileColumnFamily.NAME);
        assertTrue(Iterators.size(s.iterator()) > 0);
        FileSystem fs = getCluster().getFileSystem();
        assertTrue(fs.listStatus(new Path(rootPath + "/accumulo/tables/" + id)).length > 0);
        to.delete(tableName);
        assertEquals(0, Iterators.size(s.iterator()));
        try {
            assertEquals(0, fs.listStatus(new Path(rootPath + "/accumulo/tables/" + id)).length);
        } catch (FileNotFoundException ex) {
        // that's fine, too
        }
        assertNull(to.tableIdMap().get(tableName));
        to.create(tableName);
        TestIngest.ingest(c, opts, new BatchWriterOpts());
        VerifyIngest.verifyIngest(c, vopts, new ScannerOpts());
        to.delete(tableName);
    }
}
Also used : Path(org.apache.hadoop.fs.Path) Connector(org.apache.accumulo.core.client.Connector) Scanner(org.apache.accumulo.core.client.Scanner) MetadataTable(org.apache.accumulo.core.metadata.MetadataTable) Table(org.apache.accumulo.core.client.impl.Table) ScannerOpts(org.apache.accumulo.core.cli.ScannerOpts) BatchWriterOpts(org.apache.accumulo.core.cli.BatchWriterOpts) AccumuloCluster(org.apache.accumulo.cluster.AccumuloCluster) FileNotFoundException(java.io.FileNotFoundException) KeyExtent(org.apache.accumulo.core.data.impl.KeyExtent) ScannerOpts(org.apache.accumulo.core.cli.ScannerOpts) TableOperations(org.apache.accumulo.core.client.admin.TableOperations) TestIngest(org.apache.accumulo.test.TestIngest) VerifyIngest(org.apache.accumulo.test.VerifyIngest) FileSystem(org.apache.hadoop.fs.FileSystem) BatchWriterOpts(org.apache.accumulo.core.cli.BatchWriterOpts) MiniAccumuloClusterImpl(org.apache.accumulo.minicluster.impl.MiniAccumuloClusterImpl) ClientConfiguration(org.apache.accumulo.core.client.ClientConfiguration) Test(org.junit.Test)

Example 15 with MiniAccumuloClusterImpl

use of org.apache.accumulo.minicluster.impl.MiniAccumuloClusterImpl in project accumulo by apache.

the class WALSunnyDayIT method test.

@Test
public void test() throws Exception {
    MiniAccumuloClusterImpl mac = getCluster();
    MiniAccumuloClusterControl control = mac.getClusterControl();
    control.stop(GARBAGE_COLLECTOR);
    Connector c = getConnector();
    String tableName = getUniqueNames(1)[0];
    c.tableOperations().create(tableName);
    writeSomeData(c, tableName, 1, 1);
    // wal markers are added lazily
    Map<String, Boolean> wals = getWALsAndAssertCount(c, 2);
    for (Boolean b : wals.values()) {
        assertTrue("logs should be in use", b);
    }
    // roll log, get a new next
    writeSomeData(c, tableName, 1001, 50);
    Map<String, Boolean> walsAfterRoll = getWALsAndAssertCount(c, 3);
    assertTrue("new WALs should be a superset of the old WALs", walsAfterRoll.keySet().containsAll(wals.keySet()));
    assertEquals("all WALs should be in use", 3, countTrue(walsAfterRoll.values()));
    // flush the tables
    for (String table : new String[] { tableName, MetadataTable.NAME, RootTable.NAME }) {
        c.tableOperations().flush(table, null, null, true);
    }
    sleepUninterruptibly(1, TimeUnit.SECONDS);
    // rolled WAL is no longer in use, but needs to be GC'd
    Map<String, Boolean> walsAfterflush = getWALsAndAssertCount(c, 3);
    assertEquals("inUse should be 2", 2, countTrue(walsAfterflush.values()));
    // let the GC run for a little bit
    control.start(GARBAGE_COLLECTOR);
    sleepUninterruptibly(5, TimeUnit.SECONDS);
    // make sure the unused WAL goes away
    getWALsAndAssertCount(c, 2);
    control.stop(GARBAGE_COLLECTOR);
    // restart the tserver, but don't run recovery on all tablets
    control.stop(TABLET_SERVER);
    // this delays recovery on the normal tables
    assertEquals(0, cluster.exec(SetGoalState.class, "SAFE_MODE").waitFor());
    control.start(TABLET_SERVER);
    // wait for the metadata table to go back online
    getRecoveryMarkers(c);
    // allow a little time for the master to notice ASSIGNED_TO_DEAD_SERVER tablets
    sleepUninterruptibly(5, TimeUnit.SECONDS);
    Map<KeyExtent, List<String>> markers = getRecoveryMarkers(c);
    // log.debug("markers " + markers);
    assertEquals("one tablet should have markers", 1, markers.keySet().size());
    assertEquals("tableId of the keyExtent should be 1", "1", markers.keySet().iterator().next().getTableId().canonicalID());
    // put some data in the WAL
    assertEquals(0, cluster.exec(SetGoalState.class, "NORMAL").waitFor());
    verifySomeData(c, tableName, 1001 * 50 + 1);
    writeSomeData(c, tableName, 100, 100);
    Map<String, Boolean> walsAfterRestart = getWALsAndAssertCount(c, 4);
    // log.debug("wals after " + walsAfterRestart);
    assertEquals("used WALs after restart should be 4", 4, countTrue(walsAfterRestart.values()));
    control.start(GARBAGE_COLLECTOR);
    sleepUninterruptibly(5, TimeUnit.SECONDS);
    Map<String, Boolean> walsAfterRestartAndGC = getWALsAndAssertCount(c, 2);
    assertEquals("logs in use should be 2", 2, countTrue(walsAfterRestartAndGC.values()));
}
Also used : MiniAccumuloClusterControl(org.apache.accumulo.minicluster.impl.MiniAccumuloClusterControl) Connector(org.apache.accumulo.core.client.Connector) ArrayList(java.util.ArrayList) List(java.util.List) MiniAccumuloClusterImpl(org.apache.accumulo.minicluster.impl.MiniAccumuloClusterImpl) KeyExtent(org.apache.accumulo.core.data.impl.KeyExtent) Test(org.junit.Test)

Aggregations

MiniAccumuloClusterImpl (org.apache.accumulo.minicluster.impl.MiniAccumuloClusterImpl)34 Test (org.junit.Test)29 Connector (org.apache.accumulo.core.client.Connector)19 BatchWriter (org.apache.accumulo.core.client.BatchWriter)15 Mutation (org.apache.accumulo.core.data.Mutation)15 MiniAccumuloConfigImpl (org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl)14 BatchWriterConfig (org.apache.accumulo.core.client.BatchWriterConfig)13 Scanner (org.apache.accumulo.core.client.Scanner)13 PasswordToken (org.apache.accumulo.core.client.security.tokens.PasswordToken)12 Value (org.apache.accumulo.core.data.Value)11 ProcessReference (org.apache.accumulo.minicluster.impl.ProcessReference)11 File (java.io.File)10 Key (org.apache.accumulo.core.data.Key)10 AccumuloReplicaSystem (org.apache.accumulo.tserver.replication.AccumuloReplicaSystem)9 Path (org.apache.hadoop.fs.Path)9 PartialKey (org.apache.accumulo.core.data.PartialKey)8 IOException (java.io.IOException)5 Entry (java.util.Map.Entry)5 FileSystem (org.apache.hadoop.fs.FileSystem)5 Text (org.apache.hadoop.io.Text)4