Search in sources :

Example 1 with Table

use of org.apache.accumulo.core.client.impl.Table in project accumulo by apache.

the class SplitRecoveryIT method test.

@Test
public void test() throws Exception {
    String tableName = getUniqueNames(1)[0];
    for (int tn = 0; tn < 2; tn++) {
        Connector connector = getConnector();
        // create a table and put some data in it
        connector.tableOperations().create(tableName);
        BatchWriter bw = connector.createBatchWriter(tableName, new BatchWriterConfig());
        bw.addMutation(m("a"));
        bw.addMutation(m("b"));
        bw.addMutation(m("c"));
        bw.close();
        // take the table offline
        connector.tableOperations().offline(tableName);
        while (!isOffline(tableName, connector)) sleepUninterruptibly(200, TimeUnit.MILLISECONDS);
        // poke a partial split into the metadata table
        connector.securityOperations().grantTablePermission(getAdminPrincipal(), MetadataTable.NAME, TablePermission.WRITE);
        Table.ID tableId = Table.ID.of(connector.tableOperations().tableIdMap().get(tableName));
        KeyExtent extent = new KeyExtent(tableId, null, new Text("b"));
        Mutation m = extent.getPrevRowUpdateMutation();
        TabletsSection.TabletColumnFamily.SPLIT_RATIO_COLUMN.put(m, new Value(Double.toString(0.5).getBytes()));
        TabletsSection.TabletColumnFamily.OLD_PREV_ROW_COLUMN.put(m, KeyExtent.encodePrevEndRow(null));
        bw = connector.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
        bw.addMutation(m);
        if (tn == 1) {
            bw.flush();
            try (Scanner scanner = connector.createScanner(MetadataTable.NAME, Authorizations.EMPTY)) {
                scanner.setRange(extent.toMetadataRange());
                scanner.fetchColumnFamily(DataFileColumnFamily.NAME);
                KeyExtent extent2 = new KeyExtent(tableId, new Text("b"), null);
                m = extent2.getPrevRowUpdateMutation();
                TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.put(m, new Value("/t2".getBytes()));
                TabletsSection.ServerColumnFamily.TIME_COLUMN.put(m, new Value("M0".getBytes()));
                for (Entry<Key, Value> entry : scanner) {
                    m.put(DataFileColumnFamily.NAME, entry.getKey().getColumnQualifier(), entry.getValue());
                }
                bw.addMutation(m);
            }
        }
        bw.close();
        // bring the table online
        connector.tableOperations().online(tableName);
        // verify the tablets went online
        try (Scanner scanner = connector.createScanner(tableName, Authorizations.EMPTY)) {
            int i = 0;
            String[] expected = { "a", "b", "c" };
            for (Entry<Key, Value> entry : scanner) {
                assertEquals(expected[i], entry.getKey().getRow().toString());
                i++;
            }
            assertEquals(3, i);
            connector.tableOperations().delete(tableName);
        }
    }
}
Also used : Connector(org.apache.accumulo.core.client.Connector) Scanner(org.apache.accumulo.core.client.Scanner) Table(org.apache.accumulo.core.client.impl.Table) MetadataTable(org.apache.accumulo.core.metadata.MetadataTable) Text(org.apache.hadoop.io.Text) KeyExtent(org.apache.accumulo.core.data.impl.KeyExtent) Value(org.apache.accumulo.core.data.Value) BatchWriterConfig(org.apache.accumulo.core.client.BatchWriterConfig) BatchWriter(org.apache.accumulo.core.client.BatchWriter) Mutation(org.apache.accumulo.core.data.Mutation) Key(org.apache.accumulo.core.data.Key) Test(org.junit.Test)

Example 2 with Table

use of org.apache.accumulo.core.client.impl.Table in project accumulo by apache.

the class VolumeChooserIT method twoTablesRandomVolumeChooser.

// Test that uses two tables with 10 split points each. They each use the RandomVolumeChooser to choose volumes.
@Test
public void twoTablesRandomVolumeChooser() throws Exception {
    log.info("Starting twoTablesRandomVolumeChooser()");
    // Create namespace
    Connector connector = getConnector();
    connector.namespaceOperations().create(namespace1);
    // Set properties on the namespace
    connector.namespaceOperations().setProperty(namespace1, PerTableVolumeChooser.TABLE_VOLUME_CHOOSER, RandomVolumeChooser.class.getName());
    // Create table1 on namespace1
    String tableName = namespace1 + ".1";
    connector.tableOperations().create(tableName);
    Table.ID tableID = Table.ID.of(connector.tableOperations().tableIdMap().get(tableName));
    // Add 10 splits to the table
    addSplits(connector, tableName);
    // Write some data to the table
    writeAndReadData(connector, tableName);
    // Verify the new files are written to the Volumes specified
    verifyVolumes(connector, tableName, TabletsSection.getRange(tableID), v1.toString() + "," + v2.toString() + "," + v4.toString());
    connector.namespaceOperations().create(namespace2);
    // Set properties on the namespace
    connector.namespaceOperations().setProperty(namespace2, PerTableVolumeChooser.TABLE_VOLUME_CHOOSER, RandomVolumeChooser.class.getName());
    // Create table2 on namespace2
    String tableName2 = namespace2 + ".1";
    connector.tableOperations().create(tableName2);
    Table.ID tableID2 = Table.ID.of(connector.tableOperations().tableIdMap().get(tableName2));
    // / Add 10 splits to the table
    addSplits(connector, tableName2);
    // Write some data to the table
    writeAndReadData(connector, tableName2);
    // Verify the new files are written to the Volumes specified
    verifyVolumes(connector, tableName2, TabletsSection.getRange(tableID2), v1.toString() + "," + v2.toString() + "," + v4.toString());
}
Also used : RandomVolumeChooser(org.apache.accumulo.server.fs.RandomVolumeChooser) Connector(org.apache.accumulo.core.client.Connector) MetadataTable(org.apache.accumulo.core.metadata.MetadataTable) Table(org.apache.accumulo.core.client.impl.Table) Test(org.junit.Test)

Example 3 with Table

use of org.apache.accumulo.core.client.impl.Table in project accumulo by apache.

the class ReplicationIT method getAllLogs.

private Multimap<String, Table.ID> getAllLogs(Connector conn) throws Exception {
    Multimap<String, Table.ID> logs = getLogs(conn);
    try (Scanner scanner = conn.createScanner(ReplicationTable.NAME, Authorizations.EMPTY)) {
        StatusSection.limit(scanner);
        Text buff = new Text();
        for (Entry<Key, Value> entry : scanner) {
            if (Thread.interrupted()) {
                Thread.currentThread().interrupt();
                return logs;
            }
            StatusSection.getFile(entry.getKey(), buff);
            String file = buff.toString();
            Table.ID tableId = StatusSection.getTableId(entry.getKey());
            logs.put(file, tableId);
        }
    } catch (TableOfflineException e) {
        log.debug("Replication table isn't online yet");
    }
    return logs;
}
Also used : Scanner(org.apache.accumulo.core.client.Scanner) MetadataTable(org.apache.accumulo.core.metadata.MetadataTable) Table(org.apache.accumulo.core.client.impl.Table) ReplicationTable(org.apache.accumulo.core.replication.ReplicationTable) TableOfflineException(org.apache.accumulo.core.client.TableOfflineException) ReplicationTableOfflineException(org.apache.accumulo.core.replication.ReplicationTableOfflineException) Value(org.apache.accumulo.core.data.Value) Text(org.apache.hadoop.io.Text) UUID(java.util.UUID) Key(org.apache.accumulo.core.data.Key)

Example 4 with Table

use of org.apache.accumulo.core.client.impl.Table in project accumulo by apache.

the class ReplicationIT method replicationEntriesPrecludeWalDeletion.

@Test
public void replicationEntriesPrecludeWalDeletion() throws Exception {
    final Connector conn = getConnector();
    String table1 = "table1", table2 = "table2", table3 = "table3";
    final Multimap<String, Table.ID> logs = HashMultimap.create();
    final AtomicBoolean keepRunning = new AtomicBoolean(true);
    Thread t = new Thread(new Runnable() {

        @Override
        public void run() {
            // when that happens
            while (keepRunning.get()) {
                try {
                    logs.putAll(getAllLogs(conn));
                } catch (Exception e) {
                    log.error("Error getting logs", e);
                }
            }
        }
    });
    t.start();
    conn.tableOperations().create(table1);
    conn.tableOperations().setProperty(table1, Property.TABLE_REPLICATION.getKey(), "true");
    conn.tableOperations().setProperty(table1, Property.TABLE_REPLICATION_TARGET.getKey() + "cluster1", "1");
    Thread.sleep(2000);
    // Write some data to table1
    writeSomeData(conn, table1, 200, 500);
    conn.tableOperations().create(table2);
    conn.tableOperations().setProperty(table2, Property.TABLE_REPLICATION.getKey(), "true");
    conn.tableOperations().setProperty(table2, Property.TABLE_REPLICATION_TARGET.getKey() + "cluster1", "1");
    Thread.sleep(2000);
    writeSomeData(conn, table2, 200, 500);
    conn.tableOperations().create(table3);
    conn.tableOperations().setProperty(table3, Property.TABLE_REPLICATION.getKey(), "true");
    conn.tableOperations().setProperty(table3, Property.TABLE_REPLICATION_TARGET.getKey() + "cluster1", "1");
    Thread.sleep(2000);
    writeSomeData(conn, table3, 200, 500);
    // Force a write to metadata for the data written
    for (String table : Arrays.asList(table1, table2, table3)) {
        conn.tableOperations().flush(table, null, null, true);
    }
    keepRunning.set(false);
    t.join(5000);
    // The master is only running every second to create records in the replication table from the metadata table
    // Sleep a sufficient amount of time to ensure that we get the straggling WALs that might have been created at the end
    Thread.sleep(5000);
    Set<String> replFiles = getReferencesToFilesToBeReplicated(conn);
    // We might have a WAL that was use solely for the replication table
    // We want to remove that from our list as it should not appear in the replication table
    String replicationTableId = conn.tableOperations().tableIdMap().get(ReplicationTable.NAME);
    Iterator<Entry<String, Table.ID>> observedLogs = logs.entries().iterator();
    while (observedLogs.hasNext()) {
        Entry<String, Table.ID> observedLog = observedLogs.next();
        if (replicationTableId.equals(observedLog.getValue().canonicalID())) {
            log.info("Removing {} because its tableId is for the replication table", observedLog);
            observedLogs.remove();
        }
    }
    // We should have *some* reference to each log that was seen in the metadata table
    // They might not yet all be closed though (might be newfile)
    Assert.assertTrue("Metadata log distribution: " + logs + "replFiles " + replFiles, logs.keySet().containsAll(replFiles));
    Assert.assertTrue("Difference between replication entries and current logs is bigger than one", logs.keySet().size() - replFiles.size() <= 1);
    final Configuration conf = new Configuration();
    for (String replFile : replFiles) {
        Path p = new Path(replFile);
        FileSystem fs = p.getFileSystem(conf);
        if (!fs.exists(p)) {
            // double-check: the garbage collector can be fast
            Set<String> currentSet = getReferencesToFilesToBeReplicated(conn);
            log.info("Current references {}", currentSet);
            log.info("Looking for reference to {}", replFile);
            log.info("Contains? {}", currentSet.contains(replFile));
            Assert.assertTrue("File does not exist anymore, it was likely incorrectly garbage collected: " + p, !currentSet.contains(replFile));
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) Connector(org.apache.accumulo.core.client.Connector) MetadataTable(org.apache.accumulo.core.metadata.MetadataTable) Table(org.apache.accumulo.core.client.impl.Table) ReplicationTable(org.apache.accumulo.core.replication.ReplicationTable) Configuration(org.apache.hadoop.conf.Configuration) TableOfflineException(org.apache.accumulo.core.client.TableOfflineException) URISyntaxException(java.net.URISyntaxException) TableNotFoundException(org.apache.accumulo.core.client.TableNotFoundException) ReplicationTableOfflineException(org.apache.accumulo.core.replication.ReplicationTableOfflineException) AccumuloSecurityException(org.apache.accumulo.core.client.AccumuloSecurityException) NoSuchElementException(java.util.NoSuchElementException) AccumuloException(org.apache.accumulo.core.client.AccumuloException) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) Entry(java.util.Map.Entry) LogEntry(org.apache.accumulo.core.tabletserver.log.LogEntry) FileSystem(org.apache.hadoop.fs.FileSystem) RawLocalFileSystem(org.apache.hadoop.fs.RawLocalFileSystem) UUID(java.util.UUID) Test(org.junit.Test)

Example 5 with Table

use of org.apache.accumulo.core.client.impl.Table in project accumulo by apache.

the class ReplicationIT method filesClosedAfterUnused.

@Test
public void filesClosedAfterUnused() throws Exception {
    Connector conn = getConnector();
    String table = "table";
    conn.tableOperations().create(table);
    Table.ID tableId = Table.ID.of(conn.tableOperations().tableIdMap().get(table));
    Assert.assertNotNull(tableId);
    conn.tableOperations().setProperty(table, Property.TABLE_REPLICATION.getKey(), "true");
    conn.tableOperations().setProperty(table, Property.TABLE_REPLICATION_TARGET.getKey() + "cluster1", "1");
    // just sleep
    conn.instanceOperations().setProperty(Property.REPLICATION_PEERS.getKey() + "cluster1", ReplicaSystemFactory.getPeerConfigurationValue(MockReplicaSystem.class, "50000"));
    // Write a mutation to make a log file
    BatchWriter bw = conn.createBatchWriter(table, new BatchWriterConfig());
    Mutation m = new Mutation("one");
    m.put("", "", "");
    bw.addMutation(m);
    bw.close();
    // Write another to make sure the logger rolls itself?
    bw = conn.createBatchWriter(table, new BatchWriterConfig());
    m = new Mutation("three");
    m.put("", "", "");
    bw.addMutation(m);
    bw.close();
    try (Scanner s = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY)) {
        s.fetchColumnFamily(TabletsSection.LogColumnFamily.NAME);
        s.setRange(TabletsSection.getRange(tableId));
        Set<String> wals = new HashSet<>();
        for (Entry<Key, Value> entry : s) {
            LogEntry logEntry = LogEntry.fromKeyValue(entry.getKey(), entry.getValue());
            wals.add(new Path(logEntry.filename).toString());
        }
        log.warn("Found wals {}", wals);
        bw = conn.createBatchWriter(table, new BatchWriterConfig());
        m = new Mutation("three");
        byte[] bytes = new byte[1024 * 1024];
        m.put("1".getBytes(), new byte[0], bytes);
        m.put("2".getBytes(), new byte[0], bytes);
        m.put("3".getBytes(), new byte[0], bytes);
        m.put("4".getBytes(), new byte[0], bytes);
        m.put("5".getBytes(), new byte[0], bytes);
        bw.addMutation(m);
        bw.close();
        conn.tableOperations().flush(table, null, null, true);
        while (!ReplicationTable.isOnline(conn)) {
            sleepUninterruptibly(MILLIS_BETWEEN_REPLICATION_TABLE_ONLINE_CHECKS, TimeUnit.MILLISECONDS);
        }
        for (int i = 0; i < 10; i++) {
            try (Scanner s2 = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY)) {
                s2.fetchColumnFamily(LogColumnFamily.NAME);
                s2.setRange(TabletsSection.getRange(tableId));
                for (Entry<Key, Value> entry : s2) {
                    log.info("{}={}", entry.getKey().toStringNoTruncate(), entry.getValue());
                }
            }
            try (Scanner s3 = ReplicationTable.getScanner(conn)) {
                StatusSection.limit(s3);
                Text buff = new Text();
                boolean allReferencedLogsClosed = true;
                int recordsFound = 0;
                for (Entry<Key, Value> e : s3) {
                    recordsFound++;
                    allReferencedLogsClosed = true;
                    StatusSection.getFile(e.getKey(), buff);
                    String file = buff.toString();
                    if (wals.contains(file)) {
                        Status stat = Status.parseFrom(e.getValue().get());
                        if (!stat.getClosed()) {
                            log.info("{} wasn't closed", file);
                            allReferencedLogsClosed = false;
                        }
                    }
                }
                if (recordsFound > 0 && allReferencedLogsClosed) {
                    return;
                }
                Thread.sleep(2000);
            } catch (RuntimeException e) {
                Throwable cause = e.getCause();
                if (cause instanceof AccumuloSecurityException) {
                    AccumuloSecurityException ase = (AccumuloSecurityException) cause;
                    switch(ase.getSecurityErrorCode()) {
                        case PERMISSION_DENIED:
                            // We tried to read the replication table before the GRANT went through
                            Thread.sleep(2000);
                            break;
                        default:
                            throw e;
                    }
                }
            }
        }
        Assert.fail("We had a file that was referenced but didn't get closed");
    }
}
Also used : Path(org.apache.hadoop.fs.Path) Status(org.apache.accumulo.server.replication.proto.Replication.Status) Connector(org.apache.accumulo.core.client.Connector) Scanner(org.apache.accumulo.core.client.Scanner) MetadataTable(org.apache.accumulo.core.metadata.MetadataTable) Table(org.apache.accumulo.core.client.impl.Table) ReplicationTable(org.apache.accumulo.core.replication.ReplicationTable) Text(org.apache.hadoop.io.Text) Value(org.apache.accumulo.core.data.Value) BatchWriterConfig(org.apache.accumulo.core.client.BatchWriterConfig) AccumuloSecurityException(org.apache.accumulo.core.client.AccumuloSecurityException) BatchWriter(org.apache.accumulo.core.client.BatchWriter) Mutation(org.apache.accumulo.core.data.Mutation) Key(org.apache.accumulo.core.data.Key) LogEntry(org.apache.accumulo.core.tabletserver.log.LogEntry) HashSet(java.util.HashSet) Test(org.junit.Test)

Aggregations

Table (org.apache.accumulo.core.client.impl.Table)55 MetadataTable (org.apache.accumulo.core.metadata.MetadataTable)34 Value (org.apache.accumulo.core.data.Value)27 Key (org.apache.accumulo.core.data.Key)25 Text (org.apache.hadoop.io.Text)25 Scanner (org.apache.accumulo.core.client.Scanner)21 KeyExtent (org.apache.accumulo.core.data.impl.KeyExtent)21 Test (org.junit.Test)21 Connector (org.apache.accumulo.core.client.Connector)19 Mutation (org.apache.accumulo.core.data.Mutation)18 ReplicationTable (org.apache.accumulo.core.replication.ReplicationTable)18 BatchWriter (org.apache.accumulo.core.client.BatchWriter)17 TableNotFoundException (org.apache.accumulo.core.client.TableNotFoundException)17 AccumuloSecurityException (org.apache.accumulo.core.client.AccumuloSecurityException)15 ArrayList (java.util.ArrayList)14 AccumuloException (org.apache.accumulo.core.client.AccumuloException)14 Path (org.apache.hadoop.fs.Path)14 HashSet (java.util.HashSet)11 BatchWriterConfig (org.apache.accumulo.core.client.BatchWriterConfig)11 HashMap (java.util.HashMap)9