Search in sources :

Example 76 with TableId

use of org.apache.accumulo.core.data.TableId in project accumulo by apache.

the class SplitRecoveryIT method test.

@Test
public void test() throws Exception {
    String tableName = getUniqueNames(1)[0];
    try (AccumuloClient client = Accumulo.newClient().from(getClientProps()).build()) {
        for (int tn = 0; tn < 2; tn++) {
            // create a table and put some data in it
            client.tableOperations().create(tableName);
            try (BatchWriter bw = client.createBatchWriter(tableName)) {
                bw.addMutation(m("a"));
                bw.addMutation(m("b"));
                bw.addMutation(m("c"));
            }
            // take the table offline
            client.tableOperations().offline(tableName);
            while (!isOffline(tableName, client)) sleepUninterruptibly(200, MILLISECONDS);
            // poke a partial split into the metadata table
            client.securityOperations().grantTablePermission(getAdminPrincipal(), MetadataTable.NAME, TablePermission.WRITE);
            TableId tableId = TableId.of(client.tableOperations().tableIdMap().get(tableName));
            KeyExtent extent = new KeyExtent(tableId, null, new Text("b"));
            Mutation m = TabletColumnFamily.createPrevRowMutation(extent);
            TabletColumnFamily.SPLIT_RATIO_COLUMN.put(m, new Value(Double.toString(0.5)));
            TabletColumnFamily.OLD_PREV_ROW_COLUMN.put(m, TabletColumnFamily.encodePrevEndRow(null));
            try (BatchWriter bw = client.createBatchWriter(MetadataTable.NAME)) {
                bw.addMutation(m);
                if (tn == 1) {
                    bw.flush();
                    try (Scanner scanner = client.createScanner(MetadataTable.NAME)) {
                        scanner.setRange(extent.toMetaRange());
                        scanner.fetchColumnFamily(DataFileColumnFamily.NAME);
                        KeyExtent extent2 = new KeyExtent(tableId, new Text("b"), null);
                        m = TabletColumnFamily.createPrevRowMutation(extent2);
                        ServerColumnFamily.DIRECTORY_COLUMN.put(m, new Value("t2"));
                        ServerColumnFamily.TIME_COLUMN.put(m, new Value("M0"));
                        for (Entry<Key, Value> entry : scanner) {
                            m.put(DataFileColumnFamily.NAME, entry.getKey().getColumnQualifier(), entry.getValue());
                        }
                        bw.addMutation(m);
                    }
                }
            }
            // bring the table online
            client.tableOperations().online(tableName);
            // verify the tablets went online
            try (Scanner scanner = client.createScanner(tableName)) {
                int i = 0;
                String[] expected = { "a", "b", "c" };
                for (Entry<Key, Value> entry : scanner) {
                    assertEquals(expected[i], entry.getKey().getRow().toString());
                    i++;
                }
                assertEquals(3, i);
                client.tableOperations().delete(tableName);
            }
        }
    }
}
Also used : AccumuloClient(org.apache.accumulo.core.client.AccumuloClient) TableId(org.apache.accumulo.core.data.TableId) Scanner(org.apache.accumulo.core.client.Scanner) Text(org.apache.hadoop.io.Text) KeyExtent(org.apache.accumulo.core.dataImpl.KeyExtent) Value(org.apache.accumulo.core.data.Value) BatchWriter(org.apache.accumulo.core.client.BatchWriter) Mutation(org.apache.accumulo.core.data.Mutation) Key(org.apache.accumulo.core.data.Key) Test(org.junit.Test)

Example 77 with TableId

use of org.apache.accumulo.core.data.TableId in project accumulo by apache.

the class MissingWalHeaderCompletesRecoveryIT method testPartialHeaderWalRecoveryCompletes.

@Test
public void testPartialHeaderWalRecoveryCompletes() throws Exception {
    try (AccumuloClient client = Accumulo.newClient().from(getClientProperties()).build()) {
        MiniAccumuloClusterImpl cluster = getCluster();
        FileSystem fs = getCluster().getFileSystem();
        // Fake out something that looks like host:port, it's irrelevant
        String fakeServer = "127.0.0.1:12345";
        File walogs = new File(cluster.getConfig().getAccumuloDir(), Constants.WAL_DIR);
        File walogServerDir = new File(walogs, fakeServer.replace(':', '+'));
        File partialHeaderWalog = new File(walogServerDir, UUID.randomUUID().toString());
        log.info("Created WAL with malformed header at {}", partialHeaderWalog.toURI());
        // Write half of the header
        FSDataOutputStream wal = fs.create(new Path(partialHeaderWalog.toURI()));
        wal.write(DfsLogger.LOG_FILE_HEADER_V4.getBytes(UTF_8), 0, DfsLogger.LOG_FILE_HEADER_V4.length() / 2);
        wal.close();
        assertTrue("root user did not have write permission to metadata table", client.securityOperations().hasTablePermission("root", MetadataTable.NAME, TablePermission.WRITE));
        String tableName = getUniqueNames(1)[0];
        client.tableOperations().create(tableName);
        TableId tableId = TableId.of(client.tableOperations().tableIdMap().get(tableName));
        assertNotNull("Table ID was null", tableId);
        LogEntry logEntry = new LogEntry(null, 0, partialHeaderWalog.toURI().toString());
        log.info("Taking {} offline", tableName);
        client.tableOperations().offline(tableName, true);
        log.info("{} is offline", tableName);
        Text row = TabletsSection.encodeRow(tableId, null);
        Mutation m = new Mutation(row);
        m.put(logEntry.getColumnFamily(), logEntry.getColumnQualifier(), logEntry.getValue());
        try (BatchWriter bw = client.createBatchWriter(MetadataTable.NAME)) {
            bw.addMutation(m);
        }
        log.info("Bringing {} online", tableName);
        client.tableOperations().online(tableName, true);
        log.info("{} is online", tableName);
        // otherwise the tablet will never come online and we won't be able to read it.
        try (Scanner s = client.createScanner(tableName, Authorizations.EMPTY)) {
            assertEquals(0, Iterables.size(s));
        }
    }
}
Also used : AccumuloClient(org.apache.accumulo.core.client.AccumuloClient) Path(org.apache.hadoop.fs.Path) TableId(org.apache.accumulo.core.data.TableId) Scanner(org.apache.accumulo.core.client.Scanner) Text(org.apache.hadoop.io.Text) FileSystem(org.apache.hadoop.fs.FileSystem) RawLocalFileSystem(org.apache.hadoop.fs.RawLocalFileSystem) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) Mutation(org.apache.accumulo.core.data.Mutation) BatchWriter(org.apache.accumulo.core.client.BatchWriter) MiniAccumuloClusterImpl(org.apache.accumulo.miniclusterImpl.MiniAccumuloClusterImpl) File(java.io.File) LogEntry(org.apache.accumulo.core.tabletserver.log.LogEntry) Test(org.junit.Test)

Example 78 with TableId

use of org.apache.accumulo.core.data.TableId in project accumulo by apache.

the class MissingWalHeaderCompletesRecoveryIT method testEmptyWalRecoveryCompletes.

@Test
public void testEmptyWalRecoveryCompletes() throws Exception {
    try (AccumuloClient client = Accumulo.newClient().from(getClientProperties()).build()) {
        MiniAccumuloClusterImpl cluster = getCluster();
        FileSystem fs = cluster.getFileSystem();
        // Fake out something that looks like host:port, it's irrelevant
        String fakeServer = "127.0.0.1:12345";
        File walogs = new File(cluster.getConfig().getAccumuloDir(), Constants.WAL_DIR);
        File walogServerDir = new File(walogs, fakeServer.replace(':', '+'));
        File emptyWalog = new File(walogServerDir, UUID.randomUUID().toString());
        log.info("Created empty WAL at {}", emptyWalog.toURI());
        fs.create(new Path(emptyWalog.toURI())).close();
        assertTrue("root user did not have write permission to metadata table", client.securityOperations().hasTablePermission("root", MetadataTable.NAME, TablePermission.WRITE));
        String tableName = getUniqueNames(1)[0];
        client.tableOperations().create(tableName);
        TableId tableId = TableId.of(client.tableOperations().tableIdMap().get(tableName));
        assertNotNull("Table ID was null", tableId);
        LogEntry logEntry = new LogEntry(new KeyExtent(tableId, null, null), 0, emptyWalog.toURI().toString());
        log.info("Taking {} offline", tableName);
        client.tableOperations().offline(tableName, true);
        log.info("{} is offline", tableName);
        Text row = TabletsSection.encodeRow(tableId, null);
        Mutation m = new Mutation(row);
        m.put(logEntry.getColumnFamily(), logEntry.getColumnQualifier(), logEntry.getValue());
        try (BatchWriter bw = client.createBatchWriter(MetadataTable.NAME)) {
            bw.addMutation(m);
        }
        log.info("Bringing {} online", tableName);
        client.tableOperations().online(tableName, true);
        log.info("{} is online", tableName);
        // otherwise the tablet will never come online and we won't be able to read it.
        try (Scanner s = client.createScanner(tableName, Authorizations.EMPTY)) {
            assertEquals(0, Iterables.size(s));
        }
    }
}
Also used : AccumuloClient(org.apache.accumulo.core.client.AccumuloClient) Path(org.apache.hadoop.fs.Path) TableId(org.apache.accumulo.core.data.TableId) Scanner(org.apache.accumulo.core.client.Scanner) Text(org.apache.hadoop.io.Text) KeyExtent(org.apache.accumulo.core.dataImpl.KeyExtent) FileSystem(org.apache.hadoop.fs.FileSystem) RawLocalFileSystem(org.apache.hadoop.fs.RawLocalFileSystem) Mutation(org.apache.accumulo.core.data.Mutation) BatchWriter(org.apache.accumulo.core.client.BatchWriter) MiniAccumuloClusterImpl(org.apache.accumulo.miniclusterImpl.MiniAccumuloClusterImpl) File(java.io.File) LogEntry(org.apache.accumulo.core.tabletserver.log.LogEntry) Test(org.junit.Test)

Example 79 with TableId

use of org.apache.accumulo.core.data.TableId in project accumulo by apache.

the class VolumeIT method verifyVolumesUsed.

private void verifyVolumesUsed(AccumuloClient client, String tableName, boolean shouldExist, Path... paths) throws Exception {
    if (!client.tableOperations().exists(tableName)) {
        assertFalse(shouldExist);
        writeData(tableName, client);
        verifyData(expected, client.createScanner(tableName, Authorizations.EMPTY));
        client.tableOperations().flush(tableName, null, null, true);
    }
    verifyData(expected, client.createScanner(tableName, Authorizations.EMPTY));
    TableId tableId = TableId.of(client.tableOperations().tableIdMap().get(tableName));
    try (Scanner metaScanner = client.createScanner(MetadataTable.NAME, Authorizations.EMPTY)) {
        metaScanner.fetchColumnFamily(DataFileColumnFamily.NAME);
        metaScanner.setRange(new KeyExtent(tableId, null, null).toMetaRange());
        int[] counts = new int[paths.length];
        outer: for (Entry<Key, Value> entry : metaScanner) {
            String path = entry.getKey().getColumnQualifier().toString();
            for (int i = 0; i < paths.length; i++) {
                if (path.startsWith(paths[i].toString())) {
                    counts[i]++;
                    continue outer;
                }
            }
            fail("Unexpected volume " + path);
        }
        // keep retrying until WAL state information in ZooKeeper stabilizes or until test times out
        retry: while (true) {
            WalStateManager wals = new WalStateManager(getServerContext());
            try {
                outer: for (Entry<Path, WalState> entry : wals.getAllState().entrySet()) {
                    for (Path path : paths) {
                        if (entry.getKey().toString().startsWith(path.toString())) {
                            continue outer;
                        }
                    }
                    log.warn("Unexpected volume " + entry.getKey() + " (" + entry.getValue() + ")");
                    continue retry;
                }
            } catch (WalMarkerException e) {
                Throwable cause = e.getCause();
                if (cause instanceof NoNodeException) {
                    // ignore WALs being cleaned up
                    continue retry;
                }
                throw e;
            }
            break;
        }
        // if a volume is chosen randomly for each tablet, then the probability that a volume will not
        // be chosen for any tablet is ((num_volumes -
        // 1)/num_volumes)^num_tablets. For 100 tablets and 3 volumes the probability that only 2
        // volumes would be chosen is 2.46e-18
        int sum = 0;
        for (int count : counts) {
            assertTrue(count > 0);
            sum += count;
        }
        assertEquals(100, sum);
    }
}
Also used : TableId(org.apache.accumulo.core.data.TableId) Path(org.apache.hadoop.fs.Path) Scanner(org.apache.accumulo.core.client.Scanner) NoNodeException(org.apache.zookeeper.KeeperException.NoNodeException) KeyExtent(org.apache.accumulo.core.dataImpl.KeyExtent) Entry(java.util.Map.Entry) WalStateManager(org.apache.accumulo.server.log.WalStateManager) WalState(org.apache.accumulo.server.log.WalStateManager.WalState) WalMarkerException(org.apache.accumulo.server.log.WalStateManager.WalMarkerException)

Example 80 with TableId

use of org.apache.accumulo.core.data.TableId in project accumulo by apache.

the class InMemoryMapIT method assertEquivalentMutate.

private void assertEquivalentMutate(List<Mutation> mutations) {
    InMemoryMap defaultMap = null;
    InMemoryMap nativeMapWrapper = null;
    InMemoryMap localityGroupMap = null;
    InMemoryMap localityGroupMapWithNative = null;
    try {
        Map<String, String> defaultMapConfig = new HashMap<>();
        defaultMapConfig.put(Property.TSERV_NATIVEMAP_ENABLED.getKey(), "false");
        defaultMapConfig.put(Property.TSERV_MEMDUMP_DIR.getKey(), tempFolder.newFolder().getAbsolutePath());
        defaultMapConfig.put(Property.TABLE_LOCALITY_GROUPS.getKey(), "");
        Map<String, String> nativeMapConfig = new HashMap<>();
        nativeMapConfig.put(Property.TSERV_NATIVEMAP_ENABLED.getKey(), "true");
        nativeMapConfig.put(Property.TSERV_MEMDUMP_DIR.getKey(), tempFolder.newFolder().getAbsolutePath());
        nativeMapConfig.put(Property.TABLE_LOCALITY_GROUPS.getKey(), "");
        Map<String, String> localityGroupConfig = new HashMap<>();
        localityGroupConfig.put(Property.TSERV_NATIVEMAP_ENABLED.getKey(), "false");
        localityGroupConfig.put(Property.TSERV_MEMDUMP_DIR.getKey(), tempFolder.newFolder().getAbsolutePath());
        Map<String, String> localityGroupNativeConfig = new HashMap<>();
        localityGroupNativeConfig.put(Property.TSERV_NATIVEMAP_ENABLED.getKey(), "true");
        localityGroupNativeConfig.put(Property.TSERV_MEMDUMP_DIR.getKey(), tempFolder.newFolder().getAbsolutePath());
        TableId testId = TableId.of("TEST");
        defaultMap = new InMemoryMap(new ConfigurationCopy(defaultMapConfig), getServerContext(), testId);
        nativeMapWrapper = new InMemoryMap(new ConfigurationCopy(nativeMapConfig), getServerContext(), testId);
        localityGroupMap = new InMemoryMap(updateConfigurationForLocalityGroups(new ConfigurationCopy(localityGroupConfig)), getServerContext(), testId);
        localityGroupMapWithNative = new InMemoryMap(updateConfigurationForLocalityGroups(new ConfigurationCopy(localityGroupNativeConfig)), getServerContext(), testId);
    } catch (Exception e) {
        log.error("Error getting new InMemoryMap ", e);
        fail(e.getMessage());
    }
    // ensure the maps are correct type
    assertEquals("Not a DefaultMap", InMemoryMap.TYPE_DEFAULT_MAP, defaultMap.getMapType());
    assertEquals("Not a NativeMapWrapper", InMemoryMap.TYPE_NATIVE_MAP_WRAPPER, nativeMapWrapper.getMapType());
    assertEquals("Not a LocalityGroupMap", InMemoryMap.TYPE_LOCALITY_GROUP_MAP, localityGroupMap.getMapType());
    assertEquals("Not a LocalityGroupMap with native", InMemoryMap.TYPE_LOCALITY_GROUP_MAP_NATIVE, localityGroupMapWithNative.getMapType());
    int count = 0;
    for (Mutation m : mutations) {
        count += m.size();
    }
    defaultMap.mutate(mutations, count);
    nativeMapWrapper.mutate(mutations, count);
    localityGroupMap.mutate(mutations, count);
    localityGroupMapWithNative.mutate(mutations, count);
    // let's use the transitive property to assert all four are equivalent
    assertMutatesEquivalent(mutations, defaultMap, nativeMapWrapper);
    assertMutatesEquivalent(mutations, defaultMap, localityGroupMap);
    assertMutatesEquivalent(mutations, defaultMap, localityGroupMapWithNative);
}
Also used : TableId(org.apache.accumulo.core.data.TableId) ConfigurationCopy(org.apache.accumulo.core.conf.ConfigurationCopy) HashMap(java.util.HashMap) InMemoryMap(org.apache.accumulo.tserver.InMemoryMap) Mutation(org.apache.accumulo.core.data.Mutation) IOException(java.io.IOException) UncheckedIOException(java.io.UncheckedIOException)

Aggregations

TableId (org.apache.accumulo.core.data.TableId)169 Text (org.apache.hadoop.io.Text)64 HashMap (java.util.HashMap)55 KeyExtent (org.apache.accumulo.core.dataImpl.KeyExtent)55 ArrayList (java.util.ArrayList)45 Test (org.junit.Test)43 TableNotFoundException (org.apache.accumulo.core.client.TableNotFoundException)41 Map (java.util.Map)37 Key (org.apache.accumulo.core.data.Key)36 AccumuloClient (org.apache.accumulo.core.client.AccumuloClient)34 HashSet (java.util.HashSet)31 AccumuloSecurityException (org.apache.accumulo.core.client.AccumuloSecurityException)31 Value (org.apache.accumulo.core.data.Value)31 IOException (java.io.IOException)28 Scanner (org.apache.accumulo.core.client.Scanner)28 AccumuloException (org.apache.accumulo.core.client.AccumuloException)27 Mutation (org.apache.accumulo.core.data.Mutation)27 List (java.util.List)26 Range (org.apache.accumulo.core.data.Range)24 BatchWriter (org.apache.accumulo.core.client.BatchWriter)23