Search in sources :

Example 31 with Table

use of org.apache.accumulo.core.client.impl.Table in project accumulo by apache.

the class MasterClientServiceHandler method alterTableProperty.

private void alterTableProperty(TCredentials c, String tableName, String property, String value, TableOperation op) throws ThriftSecurityException, ThriftTableOperationException {
    final Table.ID tableId = ClientServiceHandler.checkTableId(master.getInstance(), tableName, op);
    Namespace.ID namespaceId = getNamespaceIdFromTableId(op, tableId);
    if (!master.security.canAlterTable(c, tableId, namespaceId))
        throw new ThriftSecurityException(c.getPrincipal(), SecurityErrorCode.PERMISSION_DENIED);
    try {
        if (value == null || value.isEmpty()) {
            TablePropUtil.removeTableProperty(tableId, property);
        } else if (!TablePropUtil.setTableProperty(tableId, property, value)) {
            throw new Exception("Invalid table property.");
        }
    } catch (KeeperException.NoNodeException e) {
        // race condition... table no longer exists? This call will throw an exception if the table was deleted:
        ClientServiceHandler.checkTableId(master.getInstance(), tableName, op);
        log.info("Error altering table property", e);
        throw new ThriftTableOperationException(tableId.canonicalID(), tableName, op, TableOperationExceptionType.OTHER, "Problem altering table property");
    } catch (Exception e) {
        log.error("Problem altering table property", e);
        throw new ThriftTableOperationException(tableId.canonicalID(), tableName, op, TableOperationExceptionType.OTHER, "Problem altering table property");
    }
}
Also used : MetadataTable(org.apache.accumulo.core.metadata.MetadataTable) RootTable(org.apache.accumulo.core.metadata.RootTable) Table(org.apache.accumulo.core.client.impl.Table) ReplicationTable(org.apache.accumulo.core.replication.ReplicationTable) NoNodeException(org.apache.zookeeper.KeeperException.NoNodeException) ThriftTableOperationException(org.apache.accumulo.core.client.impl.thrift.ThriftTableOperationException) ThriftSecurityException(org.apache.accumulo.core.client.impl.thrift.ThriftSecurityException) Namespace(org.apache.accumulo.core.client.impl.Namespace) TableNotFoundException(org.apache.accumulo.core.client.TableNotFoundException) InvalidProtocolBufferException(com.google.protobuf.InvalidProtocolBufferException) ThriftSecurityException(org.apache.accumulo.core.client.impl.thrift.ThriftSecurityException) AccumuloSecurityException(org.apache.accumulo.core.client.AccumuloSecurityException) TabletDeletedException(org.apache.accumulo.server.util.TabletIterator.TabletDeletedException) KeeperException(org.apache.zookeeper.KeeperException) TException(org.apache.thrift.TException) AccumuloException(org.apache.accumulo.core.client.AccumuloException) NoNodeException(org.apache.zookeeper.KeeperException.NoNodeException) ThriftTableOperationException(org.apache.accumulo.core.client.impl.thrift.ThriftTableOperationException) KeeperException(org.apache.zookeeper.KeeperException)

Example 32 with Table

use of org.apache.accumulo.core.client.impl.Table in project accumulo by apache.

the class FinishedWorkUpdater method run.

@Override
public void run() {
    log.debug("Looking for finished replication work");
    if (!ReplicationTable.isOnline(conn)) {
        log.debug("Replication table is not yet online, will retry");
        return;
    }
    BatchScanner bs;
    BatchWriter replBw;
    try {
        bs = ReplicationTable.getBatchScanner(conn, 4);
        replBw = ReplicationTable.getBatchWriter(conn);
    } catch (ReplicationTableOfflineException e) {
        log.debug("Table is no longer online, will retry");
        return;
    }
    IteratorSetting cfg = new IteratorSetting(50, WholeRowIterator.class);
    bs.addScanIterator(cfg);
    WorkSection.limit(bs);
    bs.setRanges(Collections.singleton(new Range()));
    try {
        for (Entry<Key, Value> serializedRow : bs) {
            SortedMap<Key, Value> wholeRow;
            try {
                wholeRow = WholeRowIterator.decodeRow(serializedRow.getKey(), serializedRow.getValue());
            } catch (IOException e) {
                log.warn("Could not deserialize whole row with key {}", serializedRow.getKey().toStringNoTruncate(), e);
                continue;
            }
            log.debug("Processing work progress for {} with {} columns", serializedRow.getKey().getRow(), wholeRow.size());
            Map<Table.ID, Long> tableIdToProgress = new HashMap<>();
            boolean error = false;
            Text buffer = new Text();
            // We want to determine what the minimum point that all Work entries have replicated to
            for (Entry<Key, Value> entry : wholeRow.entrySet()) {
                Status status;
                try {
                    status = Status.parseFrom(entry.getValue().get());
                } catch (InvalidProtocolBufferException e) {
                    log.warn("Could not deserialize protobuf for {}", entry.getKey(), e);
                    error = true;
                    break;
                }
                // Get the replication target for the work record
                entry.getKey().getColumnQualifier(buffer);
                ReplicationTarget target = ReplicationTarget.from(buffer);
                // Initialize the value in the map if we don't have one
                if (!tableIdToProgress.containsKey(target.getSourceTableId())) {
                    tableIdToProgress.put(target.getSourceTableId(), Long.MAX_VALUE);
                }
                // Find the minimum value for begin (everyone has replicated up to this offset in the file)
                tableIdToProgress.put(target.getSourceTableId(), Math.min(tableIdToProgress.get(target.getSourceTableId()), status.getBegin()));
            }
            if (error) {
                continue;
            }
            // Update the replication table for each source table we found work records for
            for (Entry<Table.ID, Long> entry : tableIdToProgress.entrySet()) {
                // If the progress is 0, then no one has replicated anything, and we don't need to update anything
                if (0 == entry.getValue()) {
                    continue;
                }
                serializedRow.getKey().getRow(buffer);
                log.debug("For {}, source table ID {} has replicated through {}", serializedRow.getKey().getRow(), entry.getKey(), entry.getValue());
                Mutation replMutation = new Mutation(buffer);
                // Set that we replicated at least this much data, ignoring the other fields
                Status updatedStatus = StatusUtil.replicated(entry.getValue());
                Value serializedUpdatedStatus = ProtobufUtil.toValue(updatedStatus);
                // Pull the sourceTableId into a Text
                Table.ID srcTableId = entry.getKey();
                // Make the mutation
                StatusSection.add(replMutation, srcTableId, serializedUpdatedStatus);
                log.debug("Updating replication status entry for {} with {}", serializedRow.getKey().getRow(), ProtobufUtil.toString(updatedStatus));
                try {
                    replBw.addMutation(replMutation);
                } catch (MutationsRejectedException e) {
                    log.error("Error writing mutations to update replication Status messages in StatusSection, will retry", e);
                    return;
                }
            }
        }
    } finally {
        log.debug("Finished updating files with completed replication work");
        bs.close();
        try {
            replBw.close();
        } catch (MutationsRejectedException e) {
            log.error("Error writing mutations to update replication Status messages in StatusSection, will retry", e);
        }
    }
}
Also used : Status(org.apache.accumulo.server.replication.proto.Replication.Status) Table(org.apache.accumulo.core.client.impl.Table) ReplicationTable(org.apache.accumulo.core.replication.ReplicationTable) HashMap(java.util.HashMap) BatchScanner(org.apache.accumulo.core.client.BatchScanner) InvalidProtocolBufferException(com.google.protobuf.InvalidProtocolBufferException) Text(org.apache.hadoop.io.Text) IOException(java.io.IOException) Range(org.apache.accumulo.core.data.Range) IteratorSetting(org.apache.accumulo.core.client.IteratorSetting) ReplicationTarget(org.apache.accumulo.core.replication.ReplicationTarget) Value(org.apache.accumulo.core.data.Value) BatchWriter(org.apache.accumulo.core.client.BatchWriter) ReplicationTableOfflineException(org.apache.accumulo.core.replication.ReplicationTableOfflineException) Mutation(org.apache.accumulo.core.data.Mutation) Key(org.apache.accumulo.core.data.Key) MutationsRejectedException(org.apache.accumulo.core.client.MutationsRejectedException)

Example 33 with Table

use of org.apache.accumulo.core.client.impl.Table in project accumulo by apache.

the class TableLoadBalancerTest method test.

@Test
public void test() throws Exception {
    final Instance inst = EasyMock.createMock(Instance.class);
    EasyMock.expect(inst.getInstanceID()).andReturn(UUID.nameUUIDFromBytes(new byte[] { 1, 2, 3, 4, 5, 6, 7, 8, 9, 0 }).toString()).anyTimes();
    EasyMock.expect(inst.getZooKeepers()).andReturn("10.0.0.1:1234").anyTimes();
    EasyMock.expect(inst.getZooKeepersSessionTimeOut()).andReturn(30_000).anyTimes();
    EasyMock.replay(inst);
    ServerConfigurationFactory confFactory = new ServerConfigurationFactory(inst) {

        @Override
        public TableConfiguration getTableConfiguration(Table.ID tableId) {
            // create a dummy namespaceConfiguration to satisfy requireNonNull in TableConfiguration constructor
            NamespaceConfiguration dummyConf = new NamespaceConfiguration(null, inst, null);
            return new TableConfiguration(inst, tableId, dummyConf) {

                @Override
                public String get(Property property) {
                    // fake the get table configuration so the test doesn't try to look in zookeeper for per-table classpath stuff
                    return DefaultConfiguration.getInstance().get(property);
                }
            };
        }
    };
    String t1Id = TABLE_ID_MAP.get("t1"), t2Id = TABLE_ID_MAP.get("t2"), t3Id = TABLE_ID_MAP.get("t3");
    state = new TreeMap<>();
    TServerInstance svr = mkts("10.0.0.1", "0x01020304");
    state.put(svr, status(t1Id, 10, t2Id, 10, t3Id, 10));
    Set<KeyExtent> migrations = Collections.emptySet();
    List<TabletMigration> migrationsOut = new ArrayList<>();
    TableLoadBalancer tls = new TableLoadBalancer();
    tls.init(new AccumuloServerContext(inst, confFactory));
    tls.balance(state, migrations, migrationsOut);
    Assert.assertEquals(0, migrationsOut.size());
    state.put(mkts("10.0.0.2", "0x02030405"), status());
    tls = new TableLoadBalancer();
    tls.init(new AccumuloServerContext(inst, confFactory));
    tls.balance(state, migrations, migrationsOut);
    int count = 0;
    Map<Table.ID, Integer> movedByTable = new HashMap<>();
    movedByTable.put(Table.ID.of(t1Id), 0);
    movedByTable.put(Table.ID.of(t2Id), 0);
    movedByTable.put(Table.ID.of(t3Id), 0);
    for (TabletMigration migration : migrationsOut) {
        if (migration.oldServer.equals(svr))
            count++;
        Table.ID key = migration.tablet.getTableId();
        movedByTable.put(key, movedByTable.get(key) + 1);
    }
    Assert.assertEquals(15, count);
    for (Integer moved : movedByTable.values()) {
        Assert.assertEquals(5, moved.intValue());
    }
}
Also used : AccumuloServerContext(org.apache.accumulo.server.AccumuloServerContext) Table(org.apache.accumulo.core.client.impl.Table) TabletMigration(org.apache.accumulo.server.master.state.TabletMigration) Instance(org.apache.accumulo.core.client.Instance) TServerInstance(org.apache.accumulo.server.master.state.TServerInstance) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) ServerConfigurationFactory(org.apache.accumulo.server.conf.ServerConfigurationFactory) KeyExtent(org.apache.accumulo.core.data.impl.KeyExtent) TServerInstance(org.apache.accumulo.server.master.state.TServerInstance) UUID(java.util.UUID) NamespaceConfiguration(org.apache.accumulo.server.conf.NamespaceConfiguration) Property(org.apache.accumulo.core.conf.Property) TableConfiguration(org.apache.accumulo.server.conf.TableConfiguration) Test(org.junit.Test)

Example 34 with Table

use of org.apache.accumulo.core.client.impl.Table in project accumulo by apache.

the class LargestFirstMemoryManagerTest method testDeletedTable.

@Test
public void testDeletedTable() throws Exception {
    final String deletedTableId = "1";
    Function<Table.ID, Boolean> existenceCheck = tableId -> !deletedTableId.contentEquals(tableId.canonicalID());
    LargestFirstMemoryManagerWithExistenceCheck mgr = new LargestFirstMemoryManagerWithExistenceCheck(existenceCheck);
    ServerConfiguration config = new ServerConfiguration() {

        ServerConfigurationFactory delegate = new ServerConfigurationFactory(inst);

        @Override
        public AccumuloConfiguration getSystemConfiguration() {
            return DefaultConfiguration.getInstance();
        }

        @Override
        public TableConfiguration getTableConfiguration(Table.ID tableId) {
            return delegate.getTableConfiguration(tableId);
        }

        @Override
        public NamespaceConfiguration getNamespaceConfiguration(Namespace.ID namespaceId) {
            return delegate.getNamespaceConfiguration(namespaceId);
        }
    };
    mgr.init(config);
    MemoryManagementActions result;
    // one tablet is really big and the other is for a nonexistent table
    KeyExtent extent = new KeyExtent(Table.ID.of("2"), new Text("j"), null);
    result = mgr.getMemoryManagementActions(tablets(t(extent, ZERO, ONE_GIG, 0), t(k("j"), ZERO, ONE_GIG, 0)));
    assertEquals(1, result.tabletsToMinorCompact.size());
    assertEquals(extent, result.tabletsToMinorCompact.get(0));
}
Also used : Arrays(java.util.Arrays) Table(org.apache.accumulo.core.client.impl.Table) NamespaceConfiguration(org.apache.accumulo.server.conf.NamespaceConfiguration) TabletState(org.apache.accumulo.server.tabletserver.TabletState) Text(org.apache.hadoop.io.Text) Instance(org.apache.accumulo.core.client.Instance) Test(org.junit.Test) EasyMock(org.easymock.EasyMock) Function(java.util.function.Function) LargestFirstMemoryManager(org.apache.accumulo.server.tabletserver.LargestFirstMemoryManager) DefaultConfiguration(org.apache.accumulo.core.conf.DefaultConfiguration) KeyExtent(org.apache.accumulo.core.data.impl.KeyExtent) AccumuloConfiguration(org.apache.accumulo.core.conf.AccumuloConfiguration) SiteConfiguration(org.apache.accumulo.core.conf.SiteConfiguration) Namespace(org.apache.accumulo.core.client.impl.Namespace) List(java.util.List) ServerConfigurationFactory(org.apache.accumulo.server.conf.ServerConfigurationFactory) TableConfiguration(org.apache.accumulo.server.conf.TableConfiguration) ServerConfiguration(org.apache.accumulo.server.conf.ServerConfiguration) Assert.assertEquals(org.junit.Assert.assertEquals) Property(org.apache.accumulo.core.conf.Property) MemoryManagementActions(org.apache.accumulo.server.tabletserver.MemoryManagementActions) Before(org.junit.Before) MemoryManagementActions(org.apache.accumulo.server.tabletserver.MemoryManagementActions) ServerConfiguration(org.apache.accumulo.server.conf.ServerConfiguration) ServerConfigurationFactory(org.apache.accumulo.server.conf.ServerConfigurationFactory) Text(org.apache.hadoop.io.Text) KeyExtent(org.apache.accumulo.core.data.impl.KeyExtent) Test(org.junit.Test)

Example 35 with Table

use of org.apache.accumulo.core.client.impl.Table in project accumulo by apache.

the class RewriteTabletDirectoriesIT method test.

@Test
public void test() throws Exception {
    Connector c = getConnector();
    c.securityOperations().grantTablePermission(c.whoami(), MetadataTable.NAME, TablePermission.WRITE);
    final String tableName = getUniqueNames(1)[0];
    c.tableOperations().create(tableName);
    // Write some data to a table and add some splits
    BatchWriter bw = c.createBatchWriter(tableName, null);
    final SortedSet<Text> splits = new TreeSet<>();
    for (String split : "a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,r,s,t,u,v,w,x,y,z".split(",")) {
        splits.add(new Text(split));
        Mutation m = new Mutation(new Text(split));
        m.put(new byte[] {}, new byte[] {}, new byte[] {});
        bw.addMutation(m);
    }
    bw.close();
    c.tableOperations().addSplits(tableName, splits);
    try (BatchScanner scanner = c.createBatchScanner(MetadataTable.NAME, Authorizations.EMPTY, 1)) {
        DIRECTORY_COLUMN.fetch(scanner);
        Table.ID tableId = Table.ID.of(c.tableOperations().tableIdMap().get(tableName));
        assertNotNull("TableID for " + tableName + " was null", tableId);
        scanner.setRanges(Collections.singletonList(TabletsSection.getRange(tableId)));
        // verify the directory entries are all on v1, make a few entries relative
        bw = c.createBatchWriter(MetadataTable.NAME, null);
        int count = 0;
        for (Entry<Key, Value> entry : scanner) {
            assertTrue("Expected " + entry.getValue() + " to contain " + v1, entry.getValue().toString().contains(v1.toString()));
            count++;
            if (count % 2 == 0) {
                String[] parts = entry.getValue().toString().split("/");
                Key key = entry.getKey();
                Mutation m = new Mutation(key.getRow());
                m.put(key.getColumnFamily(), key.getColumnQualifier(), new Value((Path.SEPARATOR + parts[parts.length - 1]).getBytes()));
                bw.addMutation(m);
            }
        }
        bw.close();
        assertEquals(splits.size() + 1, count);
        // This should fail: only one volume
        assertEquals(1, cluster.exec(RandomizeVolumes.class, "-z", cluster.getZooKeepers(), "-i", c.getInstance().getInstanceName(), "-t", tableName).waitFor());
        cluster.stop();
        // add the 2nd volume
        Configuration conf = new Configuration(false);
        conf.addResource(new Path(cluster.getConfig().getConfDir().toURI().toString(), "accumulo-site.xml"));
        conf.set(Property.INSTANCE_VOLUMES.getKey(), v1.toString() + "," + v2.toString());
        BufferedOutputStream fos = new BufferedOutputStream(new FileOutputStream(new File(cluster.getConfig().getConfDir(), "accumulo-site.xml")));
        conf.writeXml(fos);
        fos.close();
        // initialize volume
        assertEquals(0, cluster.exec(Initialize.class, "--add-volumes").waitFor());
        cluster.start();
        c = getConnector();
        // change the directory entries
        assertEquals(0, cluster.exec(Admin.class, "randomizeVolumes", "-t", tableName).waitFor());
        // verify a more equal sharing
        int v1Count = 0, v2Count = 0;
        for (Entry<Key, Value> entry : scanner) {
            if (entry.getValue().toString().contains(v1.toString())) {
                v1Count++;
            }
            if (entry.getValue().toString().contains(v2.toString())) {
                v2Count++;
            }
        }
        log.info("Count for volume1: {}", v1Count);
        log.info("Count for volume2: {}", v2Count);
        assertEquals(splits.size() + 1, v1Count + v2Count);
        // a fair chooser will differ by less than count(volumes)
        assertTrue("Expected the number of files to differ between volumes by less than 10. " + v1Count + " " + v2Count, Math.abs(v1Count - v2Count) < 2);
        // verify we can read the old data
        count = 0;
        for (Entry<Key, Value> entry : c.createScanner(tableName, Authorizations.EMPTY)) {
            assertTrue("Found unexpected entry in table: " + entry, splits.contains(entry.getKey().getRow()));
            count++;
        }
        assertEquals(splits.size(), count);
    }
}
Also used : Path(org.apache.hadoop.fs.Path) Connector(org.apache.accumulo.core.client.Connector) MetadataTable(org.apache.accumulo.core.metadata.MetadataTable) Table(org.apache.accumulo.core.client.impl.Table) Configuration(org.apache.hadoop.conf.Configuration) BatchScanner(org.apache.accumulo.core.client.BatchScanner) Text(org.apache.hadoop.io.Text) TreeSet(java.util.TreeSet) FileOutputStream(java.io.FileOutputStream) Value(org.apache.accumulo.core.data.Value) BatchWriter(org.apache.accumulo.core.client.BatchWriter) Mutation(org.apache.accumulo.core.data.Mutation) BufferedOutputStream(java.io.BufferedOutputStream) File(java.io.File) Key(org.apache.accumulo.core.data.Key) Test(org.junit.Test)

Aggregations

Table (org.apache.accumulo.core.client.impl.Table)55 MetadataTable (org.apache.accumulo.core.metadata.MetadataTable)34 Value (org.apache.accumulo.core.data.Value)27 Key (org.apache.accumulo.core.data.Key)25 Text (org.apache.hadoop.io.Text)25 Scanner (org.apache.accumulo.core.client.Scanner)21 KeyExtent (org.apache.accumulo.core.data.impl.KeyExtent)21 Test (org.junit.Test)21 Connector (org.apache.accumulo.core.client.Connector)19 Mutation (org.apache.accumulo.core.data.Mutation)18 ReplicationTable (org.apache.accumulo.core.replication.ReplicationTable)18 BatchWriter (org.apache.accumulo.core.client.BatchWriter)17 TableNotFoundException (org.apache.accumulo.core.client.TableNotFoundException)17 AccumuloSecurityException (org.apache.accumulo.core.client.AccumuloSecurityException)15 ArrayList (java.util.ArrayList)14 AccumuloException (org.apache.accumulo.core.client.AccumuloException)14 Path (org.apache.hadoop.fs.Path)14 HashSet (java.util.HashSet)11 BatchWriterConfig (org.apache.accumulo.core.client.BatchWriterConfig)11 HashMap (java.util.HashMap)9