Search in sources :

Example 6 with Table

use of org.apache.accumulo.core.client.impl.Table in project accumulo by apache.

the class ReplicationIT method correctClusterNameInWorkEntry.

@Test
public void correctClusterNameInWorkEntry() throws Exception {
    Connector conn = getConnector();
    String table1 = "table1";
    // replication shouldn't be online when we begin
    Assert.assertFalse(ReplicationTable.isOnline(conn));
    // Create two tables
    conn.tableOperations().create(table1);
    int attempts = 5;
    while (attempts > 0) {
        try {
            // Enable replication on table1
            conn.tableOperations().setProperty(table1, Property.TABLE_REPLICATION.getKey(), "true");
            // Replicate table1 to cluster1 in the table with id of '4'
            conn.tableOperations().setProperty(table1, Property.TABLE_REPLICATION_TARGET.getKey() + "cluster1", "4");
            attempts = 0;
        } catch (Exception e) {
            attempts--;
            if (attempts <= 0) {
                throw e;
            }
            sleepUninterruptibly(500, TimeUnit.MILLISECONDS);
        }
    }
    // Write some data to table1
    writeSomeData(conn, table1, 2000, 50);
    conn.tableOperations().flush(table1, null, null, true);
    Table.ID tableId = Table.ID.of(conn.tableOperations().tableIdMap().get(table1));
    Assert.assertNotNull("Table ID was null", tableId);
    // Make sure the replication table exists at this point
    while (!ReplicationTable.isOnline(conn)) {
        sleepUninterruptibly(MILLIS_BETWEEN_REPLICATION_TABLE_ONLINE_CHECKS, TimeUnit.MILLISECONDS);
    }
    Assert.assertTrue("Replication table did not exist", ReplicationTable.isOnline(conn));
    for (int i = 0; i < 5 && !conn.securityOperations().hasTablePermission("root", ReplicationTable.NAME, TablePermission.READ); i++) {
        Thread.sleep(1000);
    }
    Assert.assertTrue(conn.securityOperations().hasTablePermission("root", ReplicationTable.NAME, TablePermission.READ));
    boolean notFound = true;
    for (int i = 0; i < 10 && notFound; i++) {
        try (Scanner s = ReplicationTable.getScanner(conn)) {
            WorkSection.limit(s);
            try {
                Entry<Key, Value> e = Iterables.getOnlyElement(s);
                Text expectedColqual = new ReplicationTarget("cluster1", "4", tableId).toText();
                Assert.assertEquals(expectedColqual, e.getKey().getColumnQualifier());
                notFound = false;
            } catch (NoSuchElementException e) {
            } catch (IllegalArgumentException e) {
                try (Scanner s2 = ReplicationTable.getScanner(conn)) {
                    for (Entry<Key, Value> content : s2) {
                        log.info("{} => {}", content.getKey().toStringNoTruncate(), content.getValue());
                    }
                    Assert.fail("Found more than one work section entry");
                }
            }
            Thread.sleep(500);
        }
    }
    if (notFound) {
        try (Scanner s = ReplicationTable.getScanner(conn)) {
            for (Entry<Key, Value> content : s) {
                log.info("{} => {}", content.getKey().toStringNoTruncate(), content.getValue());
            }
            Assert.assertFalse("Did not find the work entry for the status entry", notFound);
        }
    }
}
Also used : Connector(org.apache.accumulo.core.client.Connector) Scanner(org.apache.accumulo.core.client.Scanner) MetadataTable(org.apache.accumulo.core.metadata.MetadataTable) Table(org.apache.accumulo.core.client.impl.Table) ReplicationTable(org.apache.accumulo.core.replication.ReplicationTable) Text(org.apache.hadoop.io.Text) TableOfflineException(org.apache.accumulo.core.client.TableOfflineException) URISyntaxException(java.net.URISyntaxException) TableNotFoundException(org.apache.accumulo.core.client.TableNotFoundException) ReplicationTableOfflineException(org.apache.accumulo.core.replication.ReplicationTableOfflineException) AccumuloSecurityException(org.apache.accumulo.core.client.AccumuloSecurityException) NoSuchElementException(java.util.NoSuchElementException) AccumuloException(org.apache.accumulo.core.client.AccumuloException) Entry(java.util.Map.Entry) LogEntry(org.apache.accumulo.core.tabletserver.log.LogEntry) ReplicationTarget(org.apache.accumulo.core.replication.ReplicationTarget) Value(org.apache.accumulo.core.data.Value) Key(org.apache.accumulo.core.data.Key) NoSuchElementException(java.util.NoSuchElementException) Test(org.junit.Test)

Example 7 with Table

use of org.apache.accumulo.core.client.impl.Table in project accumulo by apache.

the class WorkMakerIT method singleUnitMultipleTargets.

@Test
public void singleUnitMultipleTargets() throws Exception {
    String table = testName.getMethodName();
    conn.tableOperations().create(table);
    Table.ID tableId = Table.ID.of(conn.tableOperations().tableIdMap().get(table));
    String file = "hdfs://localhost:8020/accumulo/wal/123456-1234-1234-12345678";
    Mutation m = new Mutation(new Path(file).toString());
    m.put(StatusSection.NAME, new Text(tableId.getUtf8()), StatusUtil.fileCreatedValue(System.currentTimeMillis()));
    BatchWriter bw = ReplicationTable.getBatchWriter(conn);
    bw.addMutation(m);
    bw.flush();
    // Assert that we have one record in the status section
    Set<ReplicationTarget> expectedTargets = new HashSet<>();
    try (Scanner s = ReplicationTable.getScanner(conn)) {
        StatusSection.limit(s);
        Assert.assertEquals(1, Iterables.size(s));
        MockWorkMaker workMaker = new MockWorkMaker(conn);
        Map<String, String> targetClusters = ImmutableMap.of("remote_cluster_1", "4", "remote_cluster_2", "6", "remote_cluster_3", "8");
        for (Entry<String, String> cluster : targetClusters.entrySet()) {
            expectedTargets.add(new ReplicationTarget(cluster.getKey(), cluster.getValue(), tableId));
        }
        workMaker.setBatchWriter(bw);
        workMaker.addWorkRecord(new Text(file), StatusUtil.fileCreatedValue(System.currentTimeMillis()), targetClusters, tableId);
    }
    try (Scanner s = ReplicationTable.getScanner(conn)) {
        WorkSection.limit(s);
        Set<ReplicationTarget> actualTargets = new HashSet<>();
        for (Entry<Key, Value> entry : s) {
            Assert.assertEquals(file, entry.getKey().getRow().toString());
            Assert.assertEquals(WorkSection.NAME, entry.getKey().getColumnFamily());
            ReplicationTarget target = ReplicationTarget.from(entry.getKey().getColumnQualifier());
            actualTargets.add(target);
        }
        for (ReplicationTarget expected : expectedTargets) {
            Assert.assertTrue("Did not find expected target: " + expected, actualTargets.contains(expected));
            actualTargets.remove(expected);
        }
        Assert.assertTrue("Found extra replication work entries: " + actualTargets, actualTargets.isEmpty());
    }
}
Also used : Path(org.apache.hadoop.fs.Path) Scanner(org.apache.accumulo.core.client.Scanner) Table(org.apache.accumulo.core.client.impl.Table) ReplicationTable(org.apache.accumulo.core.replication.ReplicationTable) Text(org.apache.hadoop.io.Text) ReplicationTarget(org.apache.accumulo.core.replication.ReplicationTarget) Value(org.apache.accumulo.core.data.Value) Mutation(org.apache.accumulo.core.data.Mutation) BatchWriter(org.apache.accumulo.core.client.BatchWriter) Key(org.apache.accumulo.core.data.Key) HashSet(java.util.HashSet) Test(org.junit.Test)

Example 8 with Table

use of org.apache.accumulo.core.client.impl.Table in project accumulo by apache.

the class MasterClientServiceHandler method waitForFlush.

@Override
public void waitForFlush(TInfo tinfo, TCredentials c, String tableIdStr, ByteBuffer startRow, ByteBuffer endRow, long flushID, long maxLoops) throws ThriftSecurityException, ThriftTableOperationException {
    Table.ID tableId = Table.ID.of(tableIdStr);
    Namespace.ID namespaceId = getNamespaceIdFromTableId(TableOperation.FLUSH, tableId);
    master.security.canFlush(c, tableId, namespaceId);
    if (endRow != null && startRow != null && ByteBufferUtil.toText(startRow).compareTo(ByteBufferUtil.toText(endRow)) >= 0)
        throw new ThriftTableOperationException(tableId.canonicalID(), null, TableOperation.FLUSH, TableOperationExceptionType.BAD_RANGE, "start row must be less than end row");
    Set<TServerInstance> serversToFlush = new HashSet<>(master.tserverSet.getCurrentServers());
    for (long l = 0; l < maxLoops; l++) {
        for (TServerInstance instance : serversToFlush) {
            try {
                final TServerConnection server = master.tserverSet.getConnection(instance);
                if (server != null)
                    server.flush(master.masterLock, tableId, ByteBufferUtil.toBytes(startRow), ByteBufferUtil.toBytes(endRow));
            } catch (TException ex) {
                Master.log.error(ex.toString());
            }
        }
        if (l == maxLoops - 1)
            break;
        sleepUninterruptibly(50, TimeUnit.MILLISECONDS);
        serversToFlush.clear();
        try {
            Connector conn = master.getConnector();
            Scanner scanner;
            if (tableId.equals(MetadataTable.ID)) {
                scanner = new IsolatedScanner(conn.createScanner(RootTable.NAME, Authorizations.EMPTY));
                scanner.setRange(MetadataSchema.TabletsSection.getRange());
            } else {
                scanner = new IsolatedScanner(conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY));
                Range range = new KeyExtent(tableId, null, ByteBufferUtil.toText(startRow)).toMetadataRange();
                scanner.setRange(range.clip(MetadataSchema.TabletsSection.getRange()));
            }
            TabletsSection.ServerColumnFamily.FLUSH_COLUMN.fetch(scanner);
            TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.fetch(scanner);
            scanner.fetchColumnFamily(TabletsSection.CurrentLocationColumnFamily.NAME);
            scanner.fetchColumnFamily(LogColumnFamily.NAME);
            RowIterator ri = new RowIterator(scanner);
            int tabletsToWaitFor = 0;
            int tabletCount = 0;
            Text ert = ByteBufferUtil.toText(endRow);
            while (ri.hasNext()) {
                Iterator<Entry<Key, Value>> row = ri.next();
                long tabletFlushID = -1;
                int logs = 0;
                boolean online = false;
                TServerInstance server = null;
                Entry<Key, Value> entry = null;
                while (row.hasNext()) {
                    entry = row.next();
                    Key key = entry.getKey();
                    if (TabletsSection.ServerColumnFamily.FLUSH_COLUMN.equals(key.getColumnFamily(), key.getColumnQualifier())) {
                        tabletFlushID = Long.parseLong(entry.getValue().toString());
                    }
                    if (LogColumnFamily.NAME.equals(key.getColumnFamily()))
                        logs++;
                    if (TabletsSection.CurrentLocationColumnFamily.NAME.equals(key.getColumnFamily())) {
                        online = true;
                        server = new TServerInstance(entry.getValue(), key.getColumnQualifier());
                    }
                }
                // when tablet is not online and has no logs, there is no reason to wait for it
                if ((online || logs > 0) && tabletFlushID < flushID) {
                    tabletsToWaitFor++;
                    if (server != null)
                        serversToFlush.add(server);
                }
                tabletCount++;
                Text tabletEndRow = new KeyExtent(entry.getKey().getRow(), (Text) null).getEndRow();
                if (tabletEndRow == null || (ert != null && tabletEndRow.compareTo(ert) >= 0))
                    break;
            }
            if (tabletsToWaitFor == 0)
                break;
            if (tabletCount == 0 && !Tables.exists(master.getInstance(), tableId))
                throw new ThriftTableOperationException(tableId.canonicalID(), null, TableOperation.FLUSH, TableOperationExceptionType.NOTFOUND, null);
        } catch (AccumuloException | TabletDeletedException e) {
            Master.log.debug("Failed to scan {} table to wait for flush {}", MetadataTable.NAME, tableId, e);
        } catch (AccumuloSecurityException e) {
            Master.log.warn("{}", e.getMessage(), e);
            throw new ThriftSecurityException();
        } catch (TableNotFoundException e) {
            Master.log.error("{}", e.getMessage(), e);
            throw new ThriftTableOperationException();
        }
    }
}
Also used : TException(org.apache.thrift.TException) Connector(org.apache.accumulo.core.client.Connector) BatchScanner(org.apache.accumulo.core.client.BatchScanner) IsolatedScanner(org.apache.accumulo.core.client.IsolatedScanner) Scanner(org.apache.accumulo.core.client.Scanner) TKeyExtent(org.apache.accumulo.core.data.thrift.TKeyExtent) KeyExtent(org.apache.accumulo.core.data.impl.KeyExtent) TableNotFoundException(org.apache.accumulo.core.client.TableNotFoundException) Entry(java.util.Map.Entry) ThriftTableOperationException(org.apache.accumulo.core.client.impl.thrift.ThriftTableOperationException) AccumuloSecurityException(org.apache.accumulo.core.client.AccumuloSecurityException) HashSet(java.util.HashSet) AccumuloException(org.apache.accumulo.core.client.AccumuloException) MetadataTable(org.apache.accumulo.core.metadata.MetadataTable) RootTable(org.apache.accumulo.core.metadata.RootTable) Table(org.apache.accumulo.core.client.impl.Table) ReplicationTable(org.apache.accumulo.core.replication.ReplicationTable) Text(org.apache.hadoop.io.Text) Range(org.apache.accumulo.core.data.Range) TabletDeletedException(org.apache.accumulo.server.util.TabletIterator.TabletDeletedException) ThriftSecurityException(org.apache.accumulo.core.client.impl.thrift.ThriftSecurityException) Namespace(org.apache.accumulo.core.client.impl.Namespace) TServerInstance(org.apache.accumulo.server.master.state.TServerInstance) TServerConnection(org.apache.accumulo.server.master.LiveTServerSet.TServerConnection) RowIterator(org.apache.accumulo.core.client.RowIterator) Value(org.apache.accumulo.core.data.Value) IsolatedScanner(org.apache.accumulo.core.client.IsolatedScanner) Key(org.apache.accumulo.core.data.Key)

Example 9 with Table

use of org.apache.accumulo.core.client.impl.Table in project accumulo by apache.

the class GarbageCollectorCommunicatesWithTServersIT method getFilesForTable.

/**
 * Fetch all of the rfiles referenced by tablets in the metadata table for this table
 */
private Set<String> getFilesForTable(String tableName) throws Exception {
    final Connector conn = getConnector();
    final Table.ID tableId = Table.ID.of(conn.tableOperations().tableIdMap().get(tableName));
    Assert.assertNotNull("Could not determine table ID for " + tableName, tableId);
    Set<String> rfiles = new HashSet<>();
    try (Scanner s = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY)) {
        Range r = MetadataSchema.TabletsSection.getRange(tableId);
        s.setRange(r);
        s.fetchColumnFamily(MetadataSchema.TabletsSection.DataFileColumnFamily.NAME);
        for (Entry<Key, Value> entry : s) {
            log.debug("Reading RFiles: {}={}", entry.getKey().toStringNoTruncate(), entry.getValue());
            // uri://path/to/wal
            String cq = entry.getKey().getColumnQualifier().toString();
            String path = new Path(cq).toString();
            log.debug("Normalize path to rfile: {}", path);
            rfiles.add(path);
        }
    }
    return rfiles;
}
Also used : Path(org.apache.hadoop.fs.Path) Connector(org.apache.accumulo.core.client.Connector) Scanner(org.apache.accumulo.core.client.Scanner) MetadataTable(org.apache.accumulo.core.metadata.MetadataTable) Table(org.apache.accumulo.core.client.impl.Table) ReplicationTable(org.apache.accumulo.core.replication.ReplicationTable) Value(org.apache.accumulo.core.data.Value) Range(org.apache.accumulo.core.data.Range) Key(org.apache.accumulo.core.data.Key) HashSet(java.util.HashSet)

Example 10 with Table

use of org.apache.accumulo.core.client.impl.Table in project accumulo by apache.

the class TabletStateChangeIteratorIT method removeLocation.

private void removeLocation(String table, String tableNameToModify) throws TableNotFoundException, MutationsRejectedException {
    Table.ID tableIdToModify = Table.ID.of(getConnector().tableOperations().tableIdMap().get(tableNameToModify));
    BatchDeleter deleter = getConnector().createBatchDeleter(table, Authorizations.EMPTY, 1, new BatchWriterConfig());
    deleter.setRanges(Collections.singleton(new KeyExtent(tableIdToModify, null, null).toMetadataRange()));
    deleter.fetchColumnFamily(MetadataSchema.TabletsSection.CurrentLocationColumnFamily.NAME);
    deleter.delete();
    deleter.close();
}
Also used : BatchDeleter(org.apache.accumulo.core.client.BatchDeleter) MetadataTable(org.apache.accumulo.core.metadata.MetadataTable) Table(org.apache.accumulo.core.client.impl.Table) BatchWriterConfig(org.apache.accumulo.core.client.BatchWriterConfig) KeyExtent(org.apache.accumulo.core.data.impl.KeyExtent)

Aggregations

Table (org.apache.accumulo.core.client.impl.Table)55 MetadataTable (org.apache.accumulo.core.metadata.MetadataTable)34 Value (org.apache.accumulo.core.data.Value)27 Key (org.apache.accumulo.core.data.Key)25 Text (org.apache.hadoop.io.Text)25 Scanner (org.apache.accumulo.core.client.Scanner)21 KeyExtent (org.apache.accumulo.core.data.impl.KeyExtent)21 Test (org.junit.Test)21 Connector (org.apache.accumulo.core.client.Connector)19 Mutation (org.apache.accumulo.core.data.Mutation)18 ReplicationTable (org.apache.accumulo.core.replication.ReplicationTable)18 BatchWriter (org.apache.accumulo.core.client.BatchWriter)17 TableNotFoundException (org.apache.accumulo.core.client.TableNotFoundException)17 AccumuloSecurityException (org.apache.accumulo.core.client.AccumuloSecurityException)15 ArrayList (java.util.ArrayList)14 AccumuloException (org.apache.accumulo.core.client.AccumuloException)14 Path (org.apache.hadoop.fs.Path)14 HashSet (java.util.HashSet)11 BatchWriterConfig (org.apache.accumulo.core.client.BatchWriterConfig)11 HashMap (java.util.HashMap)9