Search in sources :

Example 66 with Status

use of org.apache.accumulo.server.replication.proto.Replication.Status in project accumulo by apache.

the class ReplicationOperationsImplIT method laterCreatedLogsDontBlockExecution.

@Test
public void laterCreatedLogsDontBlockExecution() throws Exception {
    conn.tableOperations().create("foo");
    Table.ID tableId1 = Table.ID.of(conn.tableOperations().tableIdMap().get("foo"));
    String file1 = "/accumulo/wals/tserver+port/" + UUID.randomUUID();
    Status stat = Status.newBuilder().setBegin(0).setEnd(10000).setInfiniteEnd(false).setClosed(false).build();
    BatchWriter bw = ReplicationTable.getBatchWriter(conn);
    Mutation m = new Mutation(file1);
    StatusSection.add(m, tableId1, ProtobufUtil.toValue(stat));
    bw.addMutation(m);
    bw.close();
    bw = conn.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
    m = new Mutation(ReplicationSection.getRowPrefix() + file1);
    m.put(ReplicationSection.COLF, new Text(tableId1.getUtf8()), ProtobufUtil.toValue(stat));
    bw.addMutation(m);
    bw.close();
    log.info("Reading metadata first time");
    for (Entry<Key, Value> e : conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY)) {
        log.info("{}", e.getKey());
    }
    final AtomicBoolean done = new AtomicBoolean(false);
    final AtomicBoolean exception = new AtomicBoolean(false);
    final ReplicationOperationsImpl roi = getReplicationOperations();
    Thread t = new Thread(new Runnable() {

        @Override
        public void run() {
            try {
                roi.drain("foo");
            } catch (Exception e) {
                log.error("Got error", e);
                exception.set(true);
            }
            done.set(true);
        }
    });
    t.start();
    // We need to wait long enough for the table to read once
    Thread.sleep(2000);
    // Write another file, but also delete the old files
    bw = conn.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
    m = new Mutation(ReplicationSection.getRowPrefix() + "/accumulo/wals/tserver+port/" + UUID.randomUUID());
    m.put(ReplicationSection.COLF, new Text(tableId1.getUtf8()), ProtobufUtil.toValue(stat));
    bw.addMutation(m);
    m = new Mutation(ReplicationSection.getRowPrefix() + file1);
    m.putDelete(ReplicationSection.COLF, new Text(tableId1.getUtf8()));
    bw.addMutation(m);
    bw.close();
    log.info("Reading metadata second time");
    for (Entry<Key, Value> e : conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY)) {
        log.info("{}", e.getKey());
    }
    bw = ReplicationTable.getBatchWriter(conn);
    m = new Mutation(file1);
    m.putDelete(StatusSection.NAME, new Text(tableId1.getUtf8()));
    bw.addMutation(m);
    bw.close();
    try {
        t.join(5000);
    } catch (InterruptedException e) {
        Assert.fail("ReplicationOperations.drain did not complete");
    }
    // We should pass immediately because we aren't waiting on both files to be deleted (just the one that we did)
    Assert.assertTrue("Drain didn't finish", done.get());
}
Also used : Status(org.apache.accumulo.server.replication.proto.Replication.Status) MetadataTable(org.apache.accumulo.core.metadata.MetadataTable) Table(org.apache.accumulo.core.client.impl.Table) ReplicationTable(org.apache.accumulo.core.replication.ReplicationTable) Text(org.apache.hadoop.io.Text) TableNotFoundException(org.apache.accumulo.core.client.TableNotFoundException) AccumuloSecurityException(org.apache.accumulo.core.client.AccumuloSecurityException) TException(org.apache.thrift.TException) AccumuloException(org.apache.accumulo.core.client.AccumuloException) ThriftTableOperationException(org.apache.accumulo.core.client.impl.thrift.ThriftTableOperationException) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) Value(org.apache.accumulo.core.data.Value) BatchWriterConfig(org.apache.accumulo.core.client.BatchWriterConfig) BatchWriter(org.apache.accumulo.core.client.BatchWriter) Mutation(org.apache.accumulo.core.data.Mutation) Key(org.apache.accumulo.core.data.Key) ReplicationOperationsImpl(org.apache.accumulo.core.client.impl.ReplicationOperationsImpl) Test(org.junit.Test)

Example 67 with Status

use of org.apache.accumulo.server.replication.proto.Replication.Status in project accumulo by apache.

the class ReplicationOperationsImplIT method unrelatedReplicationRecordsDontBlockDrain.

@Test
public void unrelatedReplicationRecordsDontBlockDrain() throws Exception {
    conn.tableOperations().create("foo");
    conn.tableOperations().create("bar");
    Table.ID tableId1 = Table.ID.of(conn.tableOperations().tableIdMap().get("foo"));
    Table.ID tableId2 = Table.ID.of(conn.tableOperations().tableIdMap().get("bar"));
    String file1 = "/accumulo/wals/tserver+port/" + UUID.randomUUID(), file2 = "/accumulo/wals/tserver+port/" + UUID.randomUUID();
    Status stat = Status.newBuilder().setBegin(0).setEnd(10000).setInfiniteEnd(false).setClosed(false).build();
    BatchWriter bw = ReplicationTable.getBatchWriter(conn);
    Mutation m = new Mutation(file1);
    StatusSection.add(m, tableId1, ProtobufUtil.toValue(stat));
    bw.addMutation(m);
    m = new Mutation(file2);
    StatusSection.add(m, tableId2, ProtobufUtil.toValue(stat));
    bw.addMutation(m);
    bw.close();
    bw = conn.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
    m = new Mutation(ReplicationSection.getRowPrefix() + file1);
    m.put(ReplicationSection.COLF, new Text(tableId1.getUtf8()), ProtobufUtil.toValue(stat));
    bw.addMutation(m);
    m = new Mutation(ReplicationSection.getRowPrefix() + file2);
    m.put(ReplicationSection.COLF, new Text(tableId2.getUtf8()), ProtobufUtil.toValue(stat));
    bw.close();
    final AtomicBoolean done = new AtomicBoolean(false);
    final AtomicBoolean exception = new AtomicBoolean(false);
    final ReplicationOperationsImpl roi = getReplicationOperations();
    Thread t = new Thread(new Runnable() {

        @Override
        public void run() {
            try {
                roi.drain("foo");
            } catch (Exception e) {
                log.error("Got error", e);
                exception.set(true);
            }
            done.set(true);
        }
    });
    t.start();
    // With the records, we shouldn't be drained
    Assert.assertFalse(done.get());
    bw = conn.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
    m = new Mutation(ReplicationSection.getRowPrefix() + file1);
    m.putDelete(ReplicationSection.COLF, new Text(tableId1.getUtf8()));
    bw.addMutation(m);
    bw.flush();
    // Removing metadata entries doesn't change anything
    Assert.assertFalse(done.get());
    // Remove the replication entries too
    bw = ReplicationTable.getBatchWriter(conn);
    m = new Mutation(file1);
    m.putDelete(StatusSection.NAME, new Text(tableId1.getUtf8()));
    bw.addMutation(m);
    bw.flush();
    try {
        t.join(5000);
    } catch (InterruptedException e) {
        Assert.fail("ReplicationOperations.drain did not complete");
    }
    // After both metadata and replication
    Assert.assertTrue("Drain never completed", done.get());
    Assert.assertFalse("Saw unexpected exception", exception.get());
}
Also used : Status(org.apache.accumulo.server.replication.proto.Replication.Status) MetadataTable(org.apache.accumulo.core.metadata.MetadataTable) Table(org.apache.accumulo.core.client.impl.Table) ReplicationTable(org.apache.accumulo.core.replication.ReplicationTable) Text(org.apache.hadoop.io.Text) TableNotFoundException(org.apache.accumulo.core.client.TableNotFoundException) AccumuloSecurityException(org.apache.accumulo.core.client.AccumuloSecurityException) TException(org.apache.thrift.TException) AccumuloException(org.apache.accumulo.core.client.AccumuloException) ThriftTableOperationException(org.apache.accumulo.core.client.impl.thrift.ThriftTableOperationException) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) BatchWriterConfig(org.apache.accumulo.core.client.BatchWriterConfig) BatchWriter(org.apache.accumulo.core.client.BatchWriter) Mutation(org.apache.accumulo.core.data.Mutation) ReplicationOperationsImpl(org.apache.accumulo.core.client.impl.ReplicationOperationsImpl) Test(org.junit.Test)

Example 68 with Status

use of org.apache.accumulo.server.replication.proto.Replication.Status in project accumulo by apache.

the class ReplicationOperationsImplIT method inprogressReplicationRecordsBlockExecution.

@Test
public void inprogressReplicationRecordsBlockExecution() throws Exception {
    conn.tableOperations().create("foo");
    Table.ID tableId1 = Table.ID.of(conn.tableOperations().tableIdMap().get("foo"));
    String file1 = "/accumulo/wals/tserver+port/" + UUID.randomUUID();
    Status stat = Status.newBuilder().setBegin(0).setEnd(10000).setInfiniteEnd(false).setClosed(false).build();
    BatchWriter bw = ReplicationTable.getBatchWriter(conn);
    Mutation m = new Mutation(file1);
    StatusSection.add(m, tableId1, ProtobufUtil.toValue(stat));
    bw.addMutation(m);
    bw.close();
    LogEntry logEntry = new LogEntry(new KeyExtent(tableId1, null, null), System.currentTimeMillis(), "tserver", file1);
    bw = conn.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
    m = new Mutation(ReplicationSection.getRowPrefix() + file1);
    m.put(ReplicationSection.COLF, new Text(tableId1.getUtf8()), ProtobufUtil.toValue(stat));
    bw.addMutation(m);
    m = new Mutation(logEntry.getRow());
    m.put(logEntry.getColumnFamily(), logEntry.getColumnQualifier(), logEntry.getValue());
    bw.addMutation(m);
    bw.close();
    final AtomicBoolean done = new AtomicBoolean(false);
    final AtomicBoolean exception = new AtomicBoolean(false);
    final ReplicationOperationsImpl roi = getReplicationOperations();
    Thread t = new Thread(new Runnable() {

        @Override
        public void run() {
            try {
                roi.drain("foo");
            } catch (Exception e) {
                log.error("Got error", e);
                exception.set(true);
            }
            done.set(true);
        }
    });
    t.start();
    // With the records, we shouldn't be drained
    Assert.assertFalse(done.get());
    Status newStatus = Status.newBuilder().setBegin(1000).setEnd(2000).setInfiniteEnd(false).setClosed(true).build();
    bw = conn.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
    m = new Mutation(ReplicationSection.getRowPrefix() + file1);
    m.put(ReplicationSection.COLF, new Text(tableId1.getUtf8()), ProtobufUtil.toValue(newStatus));
    bw.addMutation(m);
    bw.flush();
    // Removing metadata entries doesn't change anything
    Assert.assertFalse(done.get());
    // Remove the replication entries too
    bw = ReplicationTable.getBatchWriter(conn);
    m = new Mutation(file1);
    m.put(StatusSection.NAME, new Text(tableId1.getUtf8()), ProtobufUtil.toValue(newStatus));
    bw.addMutation(m);
    bw.flush();
    try {
        t.join(5000);
    } catch (InterruptedException e) {
        Assert.fail("ReplicationOperations.drain did not complete");
    }
    // New records, but not fully replicated ones don't cause it to complete
    Assert.assertFalse("Drain somehow finished", done.get());
    Assert.assertFalse("Saw unexpected exception", exception.get());
}
Also used : Status(org.apache.accumulo.server.replication.proto.Replication.Status) MetadataTable(org.apache.accumulo.core.metadata.MetadataTable) Table(org.apache.accumulo.core.client.impl.Table) ReplicationTable(org.apache.accumulo.core.replication.ReplicationTable) Text(org.apache.hadoop.io.Text) KeyExtent(org.apache.accumulo.core.data.impl.KeyExtent) TableNotFoundException(org.apache.accumulo.core.client.TableNotFoundException) AccumuloSecurityException(org.apache.accumulo.core.client.AccumuloSecurityException) TException(org.apache.thrift.TException) AccumuloException(org.apache.accumulo.core.client.AccumuloException) ThriftTableOperationException(org.apache.accumulo.core.client.impl.thrift.ThriftTableOperationException) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) BatchWriterConfig(org.apache.accumulo.core.client.BatchWriterConfig) BatchWriter(org.apache.accumulo.core.client.BatchWriter) Mutation(org.apache.accumulo.core.data.Mutation) LogEntry(org.apache.accumulo.core.tabletserver.log.LogEntry) ReplicationOperationsImpl(org.apache.accumulo.core.client.impl.ReplicationOperationsImpl) Test(org.junit.Test)

Example 69 with Status

use of org.apache.accumulo.server.replication.proto.Replication.Status in project accumulo by apache.

the class SequentialWorkAssignerIT method reprocessingOfCompletedWorkRemovesWork.

@Test
public void reprocessingOfCompletedWorkRemovesWork() throws Exception {
    ReplicationTarget target = new ReplicationTarget("cluster1", "table1", Table.ID.of("1"));
    Text serializedTarget = target.toText();
    // Create two mutations, both of which need replication work done
    BatchWriter bw = ReplicationTable.getBatchWriter(conn);
    // We want the name of file2 to sort before file1
    String filename1 = "z_file1", filename2 = "a_file1";
    String file1 = "/accumulo/wal/tserver+port/" + filename1, file2 = "/accumulo/wal/tserver+port/" + filename2;
    // File1 was closed before file2, however
    Status stat1 = Status.newBuilder().setBegin(100).setEnd(100).setClosed(true).setInfiniteEnd(false).setCreatedTime(250).build();
    Status stat2 = Status.newBuilder().setBegin(0).setEnd(100).setClosed(true).setInfiniteEnd(false).setCreatedTime(500).build();
    Mutation m = new Mutation(file1);
    WorkSection.add(m, serializedTarget, ProtobufUtil.toValue(stat1));
    bw.addMutation(m);
    m = new Mutation(file2);
    WorkSection.add(m, serializedTarget, ProtobufUtil.toValue(stat2));
    bw.addMutation(m);
    m = OrderSection.createMutation(file1, stat1.getCreatedTime());
    OrderSection.add(m, target.getSourceTableId(), ProtobufUtil.toValue(stat1));
    bw.addMutation(m);
    m = OrderSection.createMutation(file2, stat2.getCreatedTime());
    OrderSection.add(m, target.getSourceTableId(), ProtobufUtil.toValue(stat2));
    bw.addMutation(m);
    bw.close();
    DistributedWorkQueue workQueue = createMock(DistributedWorkQueue.class);
    // Treat filename1 as we have already submitted it for replication
    Map<String, Map<Table.ID, String>> queuedWork = new HashMap<>();
    Map<Table.ID, String> queuedWorkForCluster = new HashMap<>();
    queuedWorkForCluster.put(target.getSourceTableId(), DistributedWorkQueueWorkAssignerHelper.getQueueKey(filename1, target));
    queuedWork.put("cluster1", queuedWorkForCluster);
    assigner.setQueuedWork(queuedWork);
    assigner.setWorkQueue(workQueue);
    assigner.setMaxQueueSize(Integer.MAX_VALUE);
    // Make sure we expect the invocations in the correct order (accumulo is sorted)
    workQueue.addWork(DistributedWorkQueueWorkAssignerHelper.getQueueKey(filename2, target), file2);
    expectLastCall().once();
    // file2 is queued because we remove file1 because it's fully replicated
    replay(workQueue);
    assigner.createWork();
    verify(workQueue);
    Assert.assertEquals(1, queuedWork.size());
    Assert.assertTrue(queuedWork.containsKey("cluster1"));
    Map<Table.ID, String> cluster1Work = queuedWork.get("cluster1");
    Assert.assertEquals(1, cluster1Work.size());
    Assert.assertTrue(cluster1Work.containsKey(target.getSourceTableId()));
    Assert.assertEquals(DistributedWorkQueueWorkAssignerHelper.getQueueKey(filename2, target), cluster1Work.get(target.getSourceTableId()));
}
Also used : Status(org.apache.accumulo.server.replication.proto.Replication.Status) Table(org.apache.accumulo.core.client.impl.Table) ReplicationTable(org.apache.accumulo.core.replication.ReplicationTable) HashMap(java.util.HashMap) Text(org.apache.hadoop.io.Text) DistributedWorkQueue(org.apache.accumulo.server.zookeeper.DistributedWorkQueue) ReplicationTarget(org.apache.accumulo.core.replication.ReplicationTarget) BatchWriter(org.apache.accumulo.core.client.BatchWriter) Mutation(org.apache.accumulo.core.data.Mutation) HashMap(java.util.HashMap) Map(java.util.Map) Test(org.junit.Test)

Example 70 with Status

use of org.apache.accumulo.server.replication.proto.Replication.Status in project accumulo by apache.

the class SequentialWorkAssignerIT method workAcrossPeersHappensConcurrently.

@Test
public void workAcrossPeersHappensConcurrently() throws Exception {
    ReplicationTarget target1 = new ReplicationTarget("cluster1", "table1", Table.ID.of("1"));
    Text serializedTarget1 = target1.toText();
    ReplicationTarget target2 = new ReplicationTarget("cluster2", "table1", Table.ID.of("1"));
    Text serializedTarget2 = target2.toText();
    // Create two mutations, both of which need replication work done
    BatchWriter bw = ReplicationTable.getBatchWriter(conn);
    // We want the name of file2 to sort before file1
    String filename1 = "z_file1", filename2 = "a_file1";
    String file1 = "/accumulo/wal/tserver+port/" + filename1, file2 = "/accumulo/wal/tserver+port/" + filename2;
    // File1 was closed before file2, however
    Status stat1 = Status.newBuilder().setBegin(0).setEnd(100).setClosed(true).setInfiniteEnd(false).setCreatedTime(250).build();
    Status stat2 = Status.newBuilder().setBegin(0).setEnd(100).setClosed(true).setInfiniteEnd(false).setCreatedTime(500).build();
    Mutation m = new Mutation(file1);
    WorkSection.add(m, serializedTarget1, ProtobufUtil.toValue(stat1));
    bw.addMutation(m);
    m = new Mutation(file2);
    WorkSection.add(m, serializedTarget2, ProtobufUtil.toValue(stat2));
    bw.addMutation(m);
    m = OrderSection.createMutation(file1, stat1.getCreatedTime());
    OrderSection.add(m, target1.getSourceTableId(), ProtobufUtil.toValue(stat1));
    bw.addMutation(m);
    m = OrderSection.createMutation(file2, stat2.getCreatedTime());
    OrderSection.add(m, target2.getSourceTableId(), ProtobufUtil.toValue(stat2));
    bw.addMutation(m);
    bw.close();
    DistributedWorkQueue workQueue = createMock(DistributedWorkQueue.class);
    Map<String, Map<Table.ID, String>> queuedWork = new HashMap<>();
    assigner.setQueuedWork(queuedWork);
    assigner.setWorkQueue(workQueue);
    assigner.setMaxQueueSize(Integer.MAX_VALUE);
    // Make sure we expect the invocations in the correct order (accumulo is sorted)
    workQueue.addWork(DistributedWorkQueueWorkAssignerHelper.getQueueKey(filename1, target1), file1);
    expectLastCall().once();
    workQueue.addWork(DistributedWorkQueueWorkAssignerHelper.getQueueKey(filename2, target2), file2);
    expectLastCall().once();
    // file2 is *not* queued because file1 must be replicated first
    replay(workQueue);
    assigner.createWork();
    verify(workQueue);
    Assert.assertEquals(2, queuedWork.size());
    Assert.assertTrue(queuedWork.containsKey("cluster1"));
    Map<Table.ID, String> cluster1Work = queuedWork.get("cluster1");
    Assert.assertEquals(1, cluster1Work.size());
    Assert.assertTrue(cluster1Work.containsKey(target1.getSourceTableId()));
    Assert.assertEquals(DistributedWorkQueueWorkAssignerHelper.getQueueKey(filename1, target1), cluster1Work.get(target1.getSourceTableId()));
    Map<Table.ID, String> cluster2Work = queuedWork.get("cluster2");
    Assert.assertEquals(1, cluster2Work.size());
    Assert.assertTrue(cluster2Work.containsKey(target2.getSourceTableId()));
    Assert.assertEquals(DistributedWorkQueueWorkAssignerHelper.getQueueKey(filename2, target2), cluster2Work.get(target2.getSourceTableId()));
}
Also used : Status(org.apache.accumulo.server.replication.proto.Replication.Status) Table(org.apache.accumulo.core.client.impl.Table) ReplicationTable(org.apache.accumulo.core.replication.ReplicationTable) HashMap(java.util.HashMap) Text(org.apache.hadoop.io.Text) DistributedWorkQueue(org.apache.accumulo.server.zookeeper.DistributedWorkQueue) ReplicationTarget(org.apache.accumulo.core.replication.ReplicationTarget) BatchWriter(org.apache.accumulo.core.client.BatchWriter) Mutation(org.apache.accumulo.core.data.Mutation) HashMap(java.util.HashMap) Map(java.util.Map) Test(org.junit.Test)

Aggregations

Status (org.apache.accumulo.server.replication.proto.Replication.Status)77 Test (org.junit.Test)57 Mutation (org.apache.accumulo.core.data.Mutation)30 Text (org.apache.hadoop.io.Text)29 BatchWriter (org.apache.accumulo.core.client.BatchWriter)28 Key (org.apache.accumulo.core.data.Key)27 Value (org.apache.accumulo.core.data.Value)26 Scanner (org.apache.accumulo.core.client.Scanner)21 ReplicationTarget (org.apache.accumulo.core.replication.ReplicationTarget)20 Path (org.apache.hadoop.fs.Path)17 HashMap (java.util.HashMap)14 BatchWriterConfig (org.apache.accumulo.core.client.BatchWriterConfig)14 Table (org.apache.accumulo.core.client.impl.Table)14 ReplicationTable (org.apache.accumulo.core.replication.ReplicationTable)13 AccumuloSecurityException (org.apache.accumulo.core.client.AccumuloSecurityException)12 AccumuloException (org.apache.accumulo.core.client.AccumuloException)11 Connector (org.apache.accumulo.core.client.Connector)11 InvalidProtocolBufferException (com.google.protobuf.InvalidProtocolBufferException)10 TableNotFoundException (org.apache.accumulo.core.client.TableNotFoundException)10 DataInputStream (java.io.DataInputStream)9