Search in sources :

Example 1 with ReplicationOperationsImpl

use of org.apache.accumulo.core.client.impl.ReplicationOperationsImpl in project accumulo by apache.

the class ReplicationOperationsImplIT method getReplicationOperations.

/**
 * Spoof out the Master so we can call the implementation without starting a full instance.
 */
private ReplicationOperationsImpl getReplicationOperations() throws Exception {
    Master master = EasyMock.createMock(Master.class);
    EasyMock.expect(master.getConnector()).andReturn(conn).anyTimes();
    EasyMock.expect(master.getInstance()).andReturn(inst).anyTimes();
    EasyMock.replay(master);
    final MasterClientServiceHandler mcsh = new MasterClientServiceHandler(master) {

        @Override
        protected Table.ID getTableId(Instance inst, String tableName) throws ThriftTableOperationException {
            try {
                return Table.ID.of(conn.tableOperations().tableIdMap().get(tableName));
            } catch (Exception e) {
                throw new RuntimeException(e);
            }
        }
    };
    ClientContext context = new ClientContext(inst, new Credentials("root", new PasswordToken(ROOT_PASSWORD)), getClientConfig());
    return new ReplicationOperationsImpl(context) {

        @Override
        protected boolean getMasterDrain(final TInfo tinfo, final TCredentials rpcCreds, final String tableName, final Set<String> wals) throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
            try {
                return mcsh.drainReplicationTable(tinfo, rpcCreds, tableName, wals);
            } catch (TException e) {
                throw new RuntimeException(e);
            }
        }
    };
}
Also used : TInfo(org.apache.accumulo.core.trace.thrift.TInfo) TException(org.apache.thrift.TException) MetadataTable(org.apache.accumulo.core.metadata.MetadataTable) Table(org.apache.accumulo.core.client.impl.Table) ReplicationTable(org.apache.accumulo.core.replication.ReplicationTable) Set(java.util.Set) Instance(org.apache.accumulo.core.client.Instance) TCredentials(org.apache.accumulo.core.security.thrift.TCredentials) ClientContext(org.apache.accumulo.core.client.impl.ClientContext) TableNotFoundException(org.apache.accumulo.core.client.TableNotFoundException) AccumuloSecurityException(org.apache.accumulo.core.client.AccumuloSecurityException) TException(org.apache.thrift.TException) AccumuloException(org.apache.accumulo.core.client.AccumuloException) ThriftTableOperationException(org.apache.accumulo.core.client.impl.thrift.ThriftTableOperationException) Master(org.apache.accumulo.master.Master) PasswordToken(org.apache.accumulo.core.client.security.tokens.PasswordToken) MasterClientServiceHandler(org.apache.accumulo.master.MasterClientServiceHandler) TCredentials(org.apache.accumulo.core.security.thrift.TCredentials) Credentials(org.apache.accumulo.core.client.impl.Credentials) ReplicationOperationsImpl(org.apache.accumulo.core.client.impl.ReplicationOperationsImpl)

Example 2 with ReplicationOperationsImpl

use of org.apache.accumulo.core.client.impl.ReplicationOperationsImpl in project accumulo by apache.

the class ReplicationOperationsImplIT method waitsUntilEntriesAreReplicated.

@Test
public void waitsUntilEntriesAreReplicated() throws Exception {
    conn.tableOperations().create("foo");
    Table.ID tableId = Table.ID.of(conn.tableOperations().tableIdMap().get("foo"));
    String file1 = "/accumulo/wals/tserver+port/" + UUID.randomUUID(), file2 = "/accumulo/wals/tserver+port/" + UUID.randomUUID();
    Status stat = Status.newBuilder().setBegin(0).setEnd(10000).setInfiniteEnd(false).setClosed(false).build();
    BatchWriter bw = ReplicationTable.getBatchWriter(conn);
    Mutation m = new Mutation(file1);
    StatusSection.add(m, tableId, ProtobufUtil.toValue(stat));
    bw.addMutation(m);
    m = new Mutation(file2);
    StatusSection.add(m, tableId, ProtobufUtil.toValue(stat));
    bw.addMutation(m);
    bw.close();
    bw = conn.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
    m = new Mutation(ReplicationSection.getRowPrefix() + file1);
    m.put(ReplicationSection.COLF, new Text(tableId.getUtf8()), ProtobufUtil.toValue(stat));
    bw.addMutation(m);
    m = new Mutation(ReplicationSection.getRowPrefix() + file2);
    m.put(ReplicationSection.COLF, new Text(tableId.getUtf8()), ProtobufUtil.toValue(stat));
    bw.close();
    final AtomicBoolean done = new AtomicBoolean(false);
    final AtomicBoolean exception = new AtomicBoolean(false);
    final ReplicationOperationsImpl roi = getReplicationOperations();
    Thread t = new Thread(new Runnable() {

        @Override
        public void run() {
            try {
                roi.drain("foo");
            } catch (Exception e) {
                log.error("Got error", e);
                exception.set(true);
            }
            done.set(true);
        }
    });
    t.start();
    // With the records, we shouldn't be drained
    Assert.assertFalse(done.get());
    bw = conn.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
    m = new Mutation(ReplicationSection.getRowPrefix() + file1);
    m.putDelete(ReplicationSection.COLF, new Text(tableId.getUtf8()));
    bw.addMutation(m);
    bw.flush();
    Assert.assertFalse(done.get());
    m = new Mutation(ReplicationSection.getRowPrefix() + file2);
    m.putDelete(ReplicationSection.COLF, new Text(tableId.getUtf8()));
    bw.addMutation(m);
    bw.flush();
    bw.close();
    // Removing metadata entries doesn't change anything
    Assert.assertFalse(done.get());
    // Remove the replication entries too
    bw = ReplicationTable.getBatchWriter(conn);
    m = new Mutation(file1);
    m.putDelete(StatusSection.NAME, new Text(tableId.getUtf8()));
    bw.addMutation(m);
    bw.flush();
    Assert.assertFalse(done.get());
    m = new Mutation(file2);
    m.putDelete(StatusSection.NAME, new Text(tableId.getUtf8()));
    bw.addMutation(m);
    bw.flush();
    try {
        t.join(5000);
    } catch (InterruptedException e) {
        Assert.fail("ReplicationOperations.drain did not complete");
    }
    // After both metadata and replication
    Assert.assertTrue("Drain never finished", done.get());
    Assert.assertFalse("Saw unexpectetd exception", exception.get());
}
Also used : Status(org.apache.accumulo.server.replication.proto.Replication.Status) MetadataTable(org.apache.accumulo.core.metadata.MetadataTable) Table(org.apache.accumulo.core.client.impl.Table) ReplicationTable(org.apache.accumulo.core.replication.ReplicationTable) Text(org.apache.hadoop.io.Text) TableNotFoundException(org.apache.accumulo.core.client.TableNotFoundException) AccumuloSecurityException(org.apache.accumulo.core.client.AccumuloSecurityException) TException(org.apache.thrift.TException) AccumuloException(org.apache.accumulo.core.client.AccumuloException) ThriftTableOperationException(org.apache.accumulo.core.client.impl.thrift.ThriftTableOperationException) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) BatchWriterConfig(org.apache.accumulo.core.client.BatchWriterConfig) BatchWriter(org.apache.accumulo.core.client.BatchWriter) Mutation(org.apache.accumulo.core.data.Mutation) ReplicationOperationsImpl(org.apache.accumulo.core.client.impl.ReplicationOperationsImpl) Test(org.junit.Test)

Example 3 with ReplicationOperationsImpl

use of org.apache.accumulo.core.client.impl.ReplicationOperationsImpl in project accumulo by apache.

the class ReplicationOperationsImplIT method laterCreatedLogsDontBlockExecution.

@Test
public void laterCreatedLogsDontBlockExecution() throws Exception {
    conn.tableOperations().create("foo");
    Table.ID tableId1 = Table.ID.of(conn.tableOperations().tableIdMap().get("foo"));
    String file1 = "/accumulo/wals/tserver+port/" + UUID.randomUUID();
    Status stat = Status.newBuilder().setBegin(0).setEnd(10000).setInfiniteEnd(false).setClosed(false).build();
    BatchWriter bw = ReplicationTable.getBatchWriter(conn);
    Mutation m = new Mutation(file1);
    StatusSection.add(m, tableId1, ProtobufUtil.toValue(stat));
    bw.addMutation(m);
    bw.close();
    bw = conn.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
    m = new Mutation(ReplicationSection.getRowPrefix() + file1);
    m.put(ReplicationSection.COLF, new Text(tableId1.getUtf8()), ProtobufUtil.toValue(stat));
    bw.addMutation(m);
    bw.close();
    log.info("Reading metadata first time");
    for (Entry<Key, Value> e : conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY)) {
        log.info("{}", e.getKey());
    }
    final AtomicBoolean done = new AtomicBoolean(false);
    final AtomicBoolean exception = new AtomicBoolean(false);
    final ReplicationOperationsImpl roi = getReplicationOperations();
    Thread t = new Thread(new Runnable() {

        @Override
        public void run() {
            try {
                roi.drain("foo");
            } catch (Exception e) {
                log.error("Got error", e);
                exception.set(true);
            }
            done.set(true);
        }
    });
    t.start();
    // We need to wait long enough for the table to read once
    Thread.sleep(2000);
    // Write another file, but also delete the old files
    bw = conn.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
    m = new Mutation(ReplicationSection.getRowPrefix() + "/accumulo/wals/tserver+port/" + UUID.randomUUID());
    m.put(ReplicationSection.COLF, new Text(tableId1.getUtf8()), ProtobufUtil.toValue(stat));
    bw.addMutation(m);
    m = new Mutation(ReplicationSection.getRowPrefix() + file1);
    m.putDelete(ReplicationSection.COLF, new Text(tableId1.getUtf8()));
    bw.addMutation(m);
    bw.close();
    log.info("Reading metadata second time");
    for (Entry<Key, Value> e : conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY)) {
        log.info("{}", e.getKey());
    }
    bw = ReplicationTable.getBatchWriter(conn);
    m = new Mutation(file1);
    m.putDelete(StatusSection.NAME, new Text(tableId1.getUtf8()));
    bw.addMutation(m);
    bw.close();
    try {
        t.join(5000);
    } catch (InterruptedException e) {
        Assert.fail("ReplicationOperations.drain did not complete");
    }
    // We should pass immediately because we aren't waiting on both files to be deleted (just the one that we did)
    Assert.assertTrue("Drain didn't finish", done.get());
}
Also used : Status(org.apache.accumulo.server.replication.proto.Replication.Status) MetadataTable(org.apache.accumulo.core.metadata.MetadataTable) Table(org.apache.accumulo.core.client.impl.Table) ReplicationTable(org.apache.accumulo.core.replication.ReplicationTable) Text(org.apache.hadoop.io.Text) TableNotFoundException(org.apache.accumulo.core.client.TableNotFoundException) AccumuloSecurityException(org.apache.accumulo.core.client.AccumuloSecurityException) TException(org.apache.thrift.TException) AccumuloException(org.apache.accumulo.core.client.AccumuloException) ThriftTableOperationException(org.apache.accumulo.core.client.impl.thrift.ThriftTableOperationException) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) Value(org.apache.accumulo.core.data.Value) BatchWriterConfig(org.apache.accumulo.core.client.BatchWriterConfig) BatchWriter(org.apache.accumulo.core.client.BatchWriter) Mutation(org.apache.accumulo.core.data.Mutation) Key(org.apache.accumulo.core.data.Key) ReplicationOperationsImpl(org.apache.accumulo.core.client.impl.ReplicationOperationsImpl) Test(org.junit.Test)

Example 4 with ReplicationOperationsImpl

use of org.apache.accumulo.core.client.impl.ReplicationOperationsImpl in project accumulo by apache.

the class ReplicationOperationsImplIT method unrelatedReplicationRecordsDontBlockDrain.

@Test
public void unrelatedReplicationRecordsDontBlockDrain() throws Exception {
    conn.tableOperations().create("foo");
    conn.tableOperations().create("bar");
    Table.ID tableId1 = Table.ID.of(conn.tableOperations().tableIdMap().get("foo"));
    Table.ID tableId2 = Table.ID.of(conn.tableOperations().tableIdMap().get("bar"));
    String file1 = "/accumulo/wals/tserver+port/" + UUID.randomUUID(), file2 = "/accumulo/wals/tserver+port/" + UUID.randomUUID();
    Status stat = Status.newBuilder().setBegin(0).setEnd(10000).setInfiniteEnd(false).setClosed(false).build();
    BatchWriter bw = ReplicationTable.getBatchWriter(conn);
    Mutation m = new Mutation(file1);
    StatusSection.add(m, tableId1, ProtobufUtil.toValue(stat));
    bw.addMutation(m);
    m = new Mutation(file2);
    StatusSection.add(m, tableId2, ProtobufUtil.toValue(stat));
    bw.addMutation(m);
    bw.close();
    bw = conn.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
    m = new Mutation(ReplicationSection.getRowPrefix() + file1);
    m.put(ReplicationSection.COLF, new Text(tableId1.getUtf8()), ProtobufUtil.toValue(stat));
    bw.addMutation(m);
    m = new Mutation(ReplicationSection.getRowPrefix() + file2);
    m.put(ReplicationSection.COLF, new Text(tableId2.getUtf8()), ProtobufUtil.toValue(stat));
    bw.close();
    final AtomicBoolean done = new AtomicBoolean(false);
    final AtomicBoolean exception = new AtomicBoolean(false);
    final ReplicationOperationsImpl roi = getReplicationOperations();
    Thread t = new Thread(new Runnable() {

        @Override
        public void run() {
            try {
                roi.drain("foo");
            } catch (Exception e) {
                log.error("Got error", e);
                exception.set(true);
            }
            done.set(true);
        }
    });
    t.start();
    // With the records, we shouldn't be drained
    Assert.assertFalse(done.get());
    bw = conn.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
    m = new Mutation(ReplicationSection.getRowPrefix() + file1);
    m.putDelete(ReplicationSection.COLF, new Text(tableId1.getUtf8()));
    bw.addMutation(m);
    bw.flush();
    // Removing metadata entries doesn't change anything
    Assert.assertFalse(done.get());
    // Remove the replication entries too
    bw = ReplicationTable.getBatchWriter(conn);
    m = new Mutation(file1);
    m.putDelete(StatusSection.NAME, new Text(tableId1.getUtf8()));
    bw.addMutation(m);
    bw.flush();
    try {
        t.join(5000);
    } catch (InterruptedException e) {
        Assert.fail("ReplicationOperations.drain did not complete");
    }
    // After both metadata and replication
    Assert.assertTrue("Drain never completed", done.get());
    Assert.assertFalse("Saw unexpected exception", exception.get());
}
Also used : Status(org.apache.accumulo.server.replication.proto.Replication.Status) MetadataTable(org.apache.accumulo.core.metadata.MetadataTable) Table(org.apache.accumulo.core.client.impl.Table) ReplicationTable(org.apache.accumulo.core.replication.ReplicationTable) Text(org.apache.hadoop.io.Text) TableNotFoundException(org.apache.accumulo.core.client.TableNotFoundException) AccumuloSecurityException(org.apache.accumulo.core.client.AccumuloSecurityException) TException(org.apache.thrift.TException) AccumuloException(org.apache.accumulo.core.client.AccumuloException) ThriftTableOperationException(org.apache.accumulo.core.client.impl.thrift.ThriftTableOperationException) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) BatchWriterConfig(org.apache.accumulo.core.client.BatchWriterConfig) BatchWriter(org.apache.accumulo.core.client.BatchWriter) Mutation(org.apache.accumulo.core.data.Mutation) ReplicationOperationsImpl(org.apache.accumulo.core.client.impl.ReplicationOperationsImpl) Test(org.junit.Test)

Example 5 with ReplicationOperationsImpl

use of org.apache.accumulo.core.client.impl.ReplicationOperationsImpl in project accumulo by apache.

the class ReplicationOperationsImplIT method inprogressReplicationRecordsBlockExecution.

@Test
public void inprogressReplicationRecordsBlockExecution() throws Exception {
    conn.tableOperations().create("foo");
    Table.ID tableId1 = Table.ID.of(conn.tableOperations().tableIdMap().get("foo"));
    String file1 = "/accumulo/wals/tserver+port/" + UUID.randomUUID();
    Status stat = Status.newBuilder().setBegin(0).setEnd(10000).setInfiniteEnd(false).setClosed(false).build();
    BatchWriter bw = ReplicationTable.getBatchWriter(conn);
    Mutation m = new Mutation(file1);
    StatusSection.add(m, tableId1, ProtobufUtil.toValue(stat));
    bw.addMutation(m);
    bw.close();
    LogEntry logEntry = new LogEntry(new KeyExtent(tableId1, null, null), System.currentTimeMillis(), "tserver", file1);
    bw = conn.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
    m = new Mutation(ReplicationSection.getRowPrefix() + file1);
    m.put(ReplicationSection.COLF, new Text(tableId1.getUtf8()), ProtobufUtil.toValue(stat));
    bw.addMutation(m);
    m = new Mutation(logEntry.getRow());
    m.put(logEntry.getColumnFamily(), logEntry.getColumnQualifier(), logEntry.getValue());
    bw.addMutation(m);
    bw.close();
    final AtomicBoolean done = new AtomicBoolean(false);
    final AtomicBoolean exception = new AtomicBoolean(false);
    final ReplicationOperationsImpl roi = getReplicationOperations();
    Thread t = new Thread(new Runnable() {

        @Override
        public void run() {
            try {
                roi.drain("foo");
            } catch (Exception e) {
                log.error("Got error", e);
                exception.set(true);
            }
            done.set(true);
        }
    });
    t.start();
    // With the records, we shouldn't be drained
    Assert.assertFalse(done.get());
    Status newStatus = Status.newBuilder().setBegin(1000).setEnd(2000).setInfiniteEnd(false).setClosed(true).build();
    bw = conn.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
    m = new Mutation(ReplicationSection.getRowPrefix() + file1);
    m.put(ReplicationSection.COLF, new Text(tableId1.getUtf8()), ProtobufUtil.toValue(newStatus));
    bw.addMutation(m);
    bw.flush();
    // Removing metadata entries doesn't change anything
    Assert.assertFalse(done.get());
    // Remove the replication entries too
    bw = ReplicationTable.getBatchWriter(conn);
    m = new Mutation(file1);
    m.put(StatusSection.NAME, new Text(tableId1.getUtf8()), ProtobufUtil.toValue(newStatus));
    bw.addMutation(m);
    bw.flush();
    try {
        t.join(5000);
    } catch (InterruptedException e) {
        Assert.fail("ReplicationOperations.drain did not complete");
    }
    // New records, but not fully replicated ones don't cause it to complete
    Assert.assertFalse("Drain somehow finished", done.get());
    Assert.assertFalse("Saw unexpected exception", exception.get());
}
Also used : Status(org.apache.accumulo.server.replication.proto.Replication.Status) MetadataTable(org.apache.accumulo.core.metadata.MetadataTable) Table(org.apache.accumulo.core.client.impl.Table) ReplicationTable(org.apache.accumulo.core.replication.ReplicationTable) Text(org.apache.hadoop.io.Text) KeyExtent(org.apache.accumulo.core.data.impl.KeyExtent) TableNotFoundException(org.apache.accumulo.core.client.TableNotFoundException) AccumuloSecurityException(org.apache.accumulo.core.client.AccumuloSecurityException) TException(org.apache.thrift.TException) AccumuloException(org.apache.accumulo.core.client.AccumuloException) ThriftTableOperationException(org.apache.accumulo.core.client.impl.thrift.ThriftTableOperationException) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) BatchWriterConfig(org.apache.accumulo.core.client.BatchWriterConfig) BatchWriter(org.apache.accumulo.core.client.BatchWriter) Mutation(org.apache.accumulo.core.data.Mutation) LogEntry(org.apache.accumulo.core.tabletserver.log.LogEntry) ReplicationOperationsImpl(org.apache.accumulo.core.client.impl.ReplicationOperationsImpl) Test(org.junit.Test)

Aggregations

AccumuloException (org.apache.accumulo.core.client.AccumuloException)5 AccumuloSecurityException (org.apache.accumulo.core.client.AccumuloSecurityException)5 TableNotFoundException (org.apache.accumulo.core.client.TableNotFoundException)5 ReplicationOperationsImpl (org.apache.accumulo.core.client.impl.ReplicationOperationsImpl)5 Table (org.apache.accumulo.core.client.impl.Table)5 ThriftTableOperationException (org.apache.accumulo.core.client.impl.thrift.ThriftTableOperationException)5 MetadataTable (org.apache.accumulo.core.metadata.MetadataTable)5 ReplicationTable (org.apache.accumulo.core.replication.ReplicationTable)5 TException (org.apache.thrift.TException)5 AtomicBoolean (java.util.concurrent.atomic.AtomicBoolean)4 BatchWriter (org.apache.accumulo.core.client.BatchWriter)4 BatchWriterConfig (org.apache.accumulo.core.client.BatchWriterConfig)4 Mutation (org.apache.accumulo.core.data.Mutation)4 Status (org.apache.accumulo.server.replication.proto.Replication.Status)4 Text (org.apache.hadoop.io.Text)4 Test (org.junit.Test)4 Set (java.util.Set)1 Instance (org.apache.accumulo.core.client.Instance)1 ClientContext (org.apache.accumulo.core.client.impl.ClientContext)1 Credentials (org.apache.accumulo.core.client.impl.Credentials)1