Search in sources :

Example 1 with ReplicationOperationsImpl

use of org.apache.accumulo.core.clientImpl.ReplicationOperationsImpl in project accumulo by apache.

the class ReplicationOperationsImplIT method getReplicationOperations.

/**
 * Spoof out the Manager so we can call the implementation without starting a full instance.
 */
private ReplicationOperationsImpl getReplicationOperations() {
    Manager manager = EasyMock.createMock(Manager.class);
    EasyMock.expect(manager.getContext()).andReturn(context).anyTimes();
    EasyMock.replay(manager);
    final ManagerClientServiceHandler mcsh = new ManagerClientServiceHandler(manager) {

        @Override
        protected TableId getTableId(ClientContext context, String tableName) {
            try {
                return TableId.of(client.tableOperations().tableIdMap().get(tableName));
            } catch (Exception e) {
                throw new RuntimeException(e);
            }
        }
    };
    ClientContext context = (ClientContext) client;
    return new ReplicationOperationsImpl(context) {

        @Override
        protected boolean getManagerDrain(final TInfo tinfo, final TCredentials rpcCreds, final String tableName, final Set<String> wals) {
            try {
                return mcsh.drainReplicationTable(tinfo, rpcCreds, tableName, wals);
            } catch (TException e) {
                throw new RuntimeException(e);
            }
        }
    };
}
Also used : TInfo(org.apache.accumulo.core.trace.thrift.TInfo) TException(org.apache.thrift.TException) Set(java.util.Set) ManagerClientServiceHandler(org.apache.accumulo.manager.ManagerClientServiceHandler) TCredentials(org.apache.accumulo.core.securityImpl.thrift.TCredentials) ClientContext(org.apache.accumulo.core.clientImpl.ClientContext) Manager(org.apache.accumulo.manager.Manager) TException(org.apache.thrift.TException) ReplicationOperationsImpl(org.apache.accumulo.core.clientImpl.ReplicationOperationsImpl)

Example 2 with ReplicationOperationsImpl

use of org.apache.accumulo.core.clientImpl.ReplicationOperationsImpl in project accumulo by apache.

the class ReplicationOperationsImplIT method waitsUntilEntriesAreReplicated.

@Test
public void waitsUntilEntriesAreReplicated() throws Exception {
    client.tableOperations().create("foo");
    TableId tableId = TableId.of(client.tableOperations().tableIdMap().get("foo"));
    String file1 = "/accumulo/wals/tserver+port/" + UUID.randomUUID(), file2 = "/accumulo/wals/tserver+port/" + UUID.randomUUID();
    Status stat = Status.newBuilder().setBegin(0).setEnd(10000).setInfiniteEnd(false).setClosed(false).build();
    BatchWriter bw = ReplicationTable.getBatchWriter(client);
    Mutation m = new Mutation(file1);
    StatusSection.add(m, tableId, ProtobufUtil.toValue(stat));
    bw.addMutation(m);
    m = new Mutation(file2);
    StatusSection.add(m, tableId, ProtobufUtil.toValue(stat));
    bw.addMutation(m);
    bw.close();
    bw = client.createBatchWriter(MetadataTable.NAME);
    m = new Mutation(ReplicationSection.getRowPrefix() + file1);
    m.put(ReplicationSection.COLF, new Text(tableId.canonical()), ProtobufUtil.toValue(stat));
    bw.addMutation(m);
    m = new Mutation(ReplicationSection.getRowPrefix() + file2);
    m.put(ReplicationSection.COLF, new Text(tableId.canonical()), ProtobufUtil.toValue(stat));
    bw.close();
    final AtomicBoolean done = new AtomicBoolean(false);
    final AtomicBoolean exception = new AtomicBoolean(false);
    final ReplicationOperationsImpl roi = getReplicationOperations();
    Thread t = new Thread(() -> {
        try {
            roi.drain("foo");
        } catch (Exception e) {
            log.error("Got error", e);
            exception.set(true);
        }
        done.set(true);
    });
    t.start();
    // With the records, we shouldn't be drained
    assertFalse(done.get());
    bw = client.createBatchWriter(MetadataTable.NAME);
    m = new Mutation(ReplicationSection.getRowPrefix() + file1);
    m.putDelete(ReplicationSection.COLF, new Text(tableId.canonical()));
    bw.addMutation(m);
    bw.flush();
    assertFalse(done.get());
    m = new Mutation(ReplicationSection.getRowPrefix() + file2);
    m.putDelete(ReplicationSection.COLF, new Text(tableId.canonical()));
    bw.addMutation(m);
    bw.flush();
    bw.close();
    // Removing metadata entries doesn't change anything
    assertFalse(done.get());
    // Remove the replication entries too
    bw = ReplicationTable.getBatchWriter(client);
    m = new Mutation(file1);
    m.putDelete(StatusSection.NAME, new Text(tableId.canonical()));
    bw.addMutation(m);
    bw.flush();
    assertFalse(done.get());
    m = new Mutation(file2);
    m.putDelete(StatusSection.NAME, new Text(tableId.canonical()));
    bw.addMutation(m);
    bw.flush();
    try {
        t.join(5000);
    } catch (InterruptedException e) {
        fail("ReplicationOperations.drain did not complete");
    }
    // After both metadata and replication
    assertTrue("Drain never finished", done.get());
    assertFalse("Saw unexpected exception", exception.get());
}
Also used : TableId(org.apache.accumulo.core.data.TableId) Status(org.apache.accumulo.server.replication.proto.Replication.Status) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) Text(org.apache.hadoop.io.Text) BatchWriter(org.apache.accumulo.core.client.BatchWriter) Mutation(org.apache.accumulo.core.data.Mutation) ReplicationOperationsImpl(org.apache.accumulo.core.clientImpl.ReplicationOperationsImpl) TException(org.apache.thrift.TException) Test(org.junit.Test)

Example 3 with ReplicationOperationsImpl

use of org.apache.accumulo.core.clientImpl.ReplicationOperationsImpl in project accumulo by apache.

the class ReplicationOperationsImplIT method inprogressReplicationRecordsBlockExecution.

@Test
public void inprogressReplicationRecordsBlockExecution() throws Exception {
    client.tableOperations().create("foo");
    TableId tableId1 = TableId.of(client.tableOperations().tableIdMap().get("foo"));
    String file1 = "/accumulo/wals/tserver+port/" + UUID.randomUUID();
    Status stat = Status.newBuilder().setBegin(0).setEnd(10000).setInfiniteEnd(false).setClosed(false).build();
    BatchWriter bw = ReplicationTable.getBatchWriter(client);
    Mutation m = new Mutation(file1);
    StatusSection.add(m, tableId1, ProtobufUtil.toValue(stat));
    bw.addMutation(m);
    bw.close();
    LogEntry logEntry = new LogEntry(new KeyExtent(tableId1, null, null), System.currentTimeMillis(), file1);
    bw = client.createBatchWriter(MetadataTable.NAME);
    m = new Mutation(ReplicationSection.getRowPrefix() + file1);
    m.put(ReplicationSection.COLF, new Text(tableId1.canonical()), ProtobufUtil.toValue(stat));
    bw.addMutation(m);
    m = new Mutation(logEntry.getRow());
    m.put(logEntry.getColumnFamily(), logEntry.getColumnQualifier(), logEntry.getValue());
    bw.addMutation(m);
    bw.close();
    final AtomicBoolean done = new AtomicBoolean(false);
    final AtomicBoolean exception = new AtomicBoolean(false);
    final ReplicationOperationsImpl roi = getReplicationOperations();
    Thread t = new Thread(() -> {
        try {
            roi.drain("foo");
        } catch (Exception e) {
            log.error("Got error", e);
            exception.set(true);
        }
        done.set(true);
    });
    t.start();
    // With the records, we shouldn't be drained
    assertFalse(done.get());
    Status newStatus = Status.newBuilder().setBegin(1000).setEnd(2000).setInfiniteEnd(false).setClosed(true).build();
    bw = client.createBatchWriter(MetadataTable.NAME);
    m = new Mutation(ReplicationSection.getRowPrefix() + file1);
    m.put(ReplicationSection.COLF, new Text(tableId1.canonical()), ProtobufUtil.toValue(newStatus));
    bw.addMutation(m);
    bw.flush();
    // Removing metadata entries doesn't change anything
    assertFalse(done.get());
    // Remove the replication entries too
    bw = ReplicationTable.getBatchWriter(client);
    m = new Mutation(file1);
    m.put(StatusSection.NAME, new Text(tableId1.canonical()), ProtobufUtil.toValue(newStatus));
    bw.addMutation(m);
    bw.flush();
    try {
        t.join(5000);
    } catch (InterruptedException e) {
        fail("ReplicationOperations.drain did not complete");
    }
    // New records, but not fully replicated ones don't cause it to complete
    assertFalse("Drain somehow finished", done.get());
    assertFalse("Saw unexpected exception", exception.get());
}
Also used : TableId(org.apache.accumulo.core.data.TableId) Status(org.apache.accumulo.server.replication.proto.Replication.Status) Text(org.apache.hadoop.io.Text) KeyExtent(org.apache.accumulo.core.dataImpl.KeyExtent) TException(org.apache.thrift.TException) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) BatchWriter(org.apache.accumulo.core.client.BatchWriter) Mutation(org.apache.accumulo.core.data.Mutation) LogEntry(org.apache.accumulo.core.tabletserver.log.LogEntry) ReplicationOperationsImpl(org.apache.accumulo.core.clientImpl.ReplicationOperationsImpl) Test(org.junit.Test)

Example 4 with ReplicationOperationsImpl

use of org.apache.accumulo.core.clientImpl.ReplicationOperationsImpl in project accumulo by apache.

the class ReplicationOperationsImplIT method unrelatedReplicationRecordsDontBlockDrain.

@Test
public void unrelatedReplicationRecordsDontBlockDrain() throws Exception {
    client.tableOperations().create("foo");
    client.tableOperations().create("bar");
    TableId tableId1 = TableId.of(client.tableOperations().tableIdMap().get("foo"));
    TableId tableId2 = TableId.of(client.tableOperations().tableIdMap().get("bar"));
    String file1 = "/accumulo/wals/tserver+port/" + UUID.randomUUID(), file2 = "/accumulo/wals/tserver+port/" + UUID.randomUUID();
    Status stat = Status.newBuilder().setBegin(0).setEnd(10000).setInfiniteEnd(false).setClosed(false).build();
    BatchWriter bw = ReplicationTable.getBatchWriter(client);
    Mutation m = new Mutation(file1);
    StatusSection.add(m, tableId1, ProtobufUtil.toValue(stat));
    bw.addMutation(m);
    m = new Mutation(file2);
    StatusSection.add(m, tableId2, ProtobufUtil.toValue(stat));
    bw.addMutation(m);
    bw.close();
    bw = client.createBatchWriter(MetadataTable.NAME);
    m = new Mutation(ReplicationSection.getRowPrefix() + file1);
    m.put(ReplicationSection.COLF, new Text(tableId1.canonical()), ProtobufUtil.toValue(stat));
    bw.addMutation(m);
    m = new Mutation(ReplicationSection.getRowPrefix() + file2);
    m.put(ReplicationSection.COLF, new Text(tableId2.canonical()), ProtobufUtil.toValue(stat));
    bw.close();
    final AtomicBoolean done = new AtomicBoolean(false);
    final AtomicBoolean exception = new AtomicBoolean(false);
    final ReplicationOperationsImpl roi = getReplicationOperations();
    Thread t = new Thread(() -> {
        try {
            roi.drain("foo");
        } catch (Exception e) {
            log.error("Got error", e);
            exception.set(true);
        }
        done.set(true);
    });
    t.start();
    // With the records, we shouldn't be drained
    assertFalse(done.get());
    bw = client.createBatchWriter(MetadataTable.NAME);
    m = new Mutation(ReplicationSection.getRowPrefix() + file1);
    m.putDelete(ReplicationSection.COLF, new Text(tableId1.canonical()));
    bw.addMutation(m);
    bw.flush();
    // Removing metadata entries doesn't change anything
    assertFalse(done.get());
    // Remove the replication entries too
    bw = ReplicationTable.getBatchWriter(client);
    m = new Mutation(file1);
    m.putDelete(StatusSection.NAME, new Text(tableId1.canonical()));
    bw.addMutation(m);
    bw.flush();
    bw.close();
    try {
        t.join(5000);
    } catch (InterruptedException e) {
        fail("ReplicationOperations.drain did not complete");
    }
    // After both metadata and replication
    assertTrue("Drain never completed", done.get());
    assertFalse("Saw unexpected exception", exception.get());
}
Also used : TableId(org.apache.accumulo.core.data.TableId) Status(org.apache.accumulo.server.replication.proto.Replication.Status) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) Text(org.apache.hadoop.io.Text) BatchWriter(org.apache.accumulo.core.client.BatchWriter) Mutation(org.apache.accumulo.core.data.Mutation) ReplicationOperationsImpl(org.apache.accumulo.core.clientImpl.ReplicationOperationsImpl) TException(org.apache.thrift.TException) Test(org.junit.Test)

Example 5 with ReplicationOperationsImpl

use of org.apache.accumulo.core.clientImpl.ReplicationOperationsImpl in project accumulo by apache.

the class ReplicationOperationsImplIT method laterCreatedLogsDontBlockExecution.

@Test
public void laterCreatedLogsDontBlockExecution() throws Exception {
    client.tableOperations().create("foo");
    TableId tableId1 = TableId.of(client.tableOperations().tableIdMap().get("foo"));
    String file1 = "/accumulo/wals/tserver+port/" + UUID.randomUUID();
    Status stat = Status.newBuilder().setBegin(0).setEnd(10000).setInfiniteEnd(false).setClosed(false).build();
    BatchWriter bw = ReplicationTable.getBatchWriter(client);
    Mutation m = new Mutation(file1);
    StatusSection.add(m, tableId1, ProtobufUtil.toValue(stat));
    bw.addMutation(m);
    bw.close();
    bw = client.createBatchWriter(MetadataTable.NAME);
    m = new Mutation(ReplicationSection.getRowPrefix() + file1);
    m.put(ReplicationSection.COLF, new Text(tableId1.canonical()), ProtobufUtil.toValue(stat));
    bw.addMutation(m);
    bw.close();
    log.info("Reading metadata first time");
    try (var scanner = client.createScanner(MetadataTable.NAME, Authorizations.EMPTY)) {
        for (Entry<Key, Value> e : scanner) {
            log.info("{}", e.getKey());
        }
    }
    final AtomicBoolean done = new AtomicBoolean(false);
    final AtomicBoolean exception = new AtomicBoolean(false);
    final ReplicationOperationsImpl roi = getReplicationOperations();
    Thread t = new Thread(() -> {
        try {
            roi.drain("foo");
        } catch (Exception e) {
            log.error("Got error", e);
            exception.set(true);
        }
        done.set(true);
    });
    t.start();
    // We need to wait long enough for the table to read once
    Thread.sleep(2000);
    // Write another file, but also delete the old files
    bw = client.createBatchWriter(MetadataTable.NAME);
    m = new Mutation(ReplicationSection.getRowPrefix() + "/accumulo/wals/tserver+port/" + UUID.randomUUID());
    m.put(ReplicationSection.COLF, new Text(tableId1.canonical()), ProtobufUtil.toValue(stat));
    bw.addMutation(m);
    m = new Mutation(ReplicationSection.getRowPrefix() + file1);
    m.putDelete(ReplicationSection.COLF, new Text(tableId1.canonical()));
    bw.addMutation(m);
    bw.close();
    log.info("Reading metadata second time");
    try (var scanner = client.createScanner(MetadataTable.NAME, Authorizations.EMPTY)) {
        for (Entry<Key, Value> e : scanner) {
            log.info("{}", e.getKey());
        }
    }
    bw = ReplicationTable.getBatchWriter(client);
    m = new Mutation(file1);
    m.putDelete(StatusSection.NAME, new Text(tableId1.canonical()));
    bw.addMutation(m);
    bw.close();
    try {
        t.join(5000);
    } catch (InterruptedException e) {
        fail("ReplicationOperations.drain did not complete");
    }
    // We should pass immediately because we aren't waiting on both files to be deleted (just the
    // one that we did)
    assertTrue("Drain didn't finish", done.get());
}
Also used : TableId(org.apache.accumulo.core.data.TableId) Status(org.apache.accumulo.server.replication.proto.Replication.Status) Text(org.apache.hadoop.io.Text) TException(org.apache.thrift.TException) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) Value(org.apache.accumulo.core.data.Value) BatchWriter(org.apache.accumulo.core.client.BatchWriter) Mutation(org.apache.accumulo.core.data.Mutation) Key(org.apache.accumulo.core.data.Key) ReplicationOperationsImpl(org.apache.accumulo.core.clientImpl.ReplicationOperationsImpl) Test(org.junit.Test)

Aggregations

ReplicationOperationsImpl (org.apache.accumulo.core.clientImpl.ReplicationOperationsImpl)5 TException (org.apache.thrift.TException)5 AtomicBoolean (java.util.concurrent.atomic.AtomicBoolean)4 BatchWriter (org.apache.accumulo.core.client.BatchWriter)4 Mutation (org.apache.accumulo.core.data.Mutation)4 TableId (org.apache.accumulo.core.data.TableId)4 Status (org.apache.accumulo.server.replication.proto.Replication.Status)4 Text (org.apache.hadoop.io.Text)4 Test (org.junit.Test)4 Set (java.util.Set)1 ClientContext (org.apache.accumulo.core.clientImpl.ClientContext)1 Key (org.apache.accumulo.core.data.Key)1 Value (org.apache.accumulo.core.data.Value)1 KeyExtent (org.apache.accumulo.core.dataImpl.KeyExtent)1 TCredentials (org.apache.accumulo.core.securityImpl.thrift.TCredentials)1 LogEntry (org.apache.accumulo.core.tabletserver.log.LogEntry)1 TInfo (org.apache.accumulo.core.trace.thrift.TInfo)1 Manager (org.apache.accumulo.manager.Manager)1 ManagerClientServiceHandler (org.apache.accumulo.manager.ManagerClientServiceHandler)1