Search in sources :

Example 6 with DistributedWorkQueue

use of org.apache.accumulo.server.zookeeper.DistributedWorkQueue in project accumulo by apache.

the class ReplicationWorker method run.

@Override
public void run() {
    DefaultConfiguration defaultConf = DefaultConfiguration.getInstance();
    long defaultDelay = defaultConf.getTimeInMillis(Property.REPLICATION_WORK_PROCESSOR_DELAY);
    long defaultPeriod = defaultConf.getTimeInMillis(Property.REPLICATION_WORK_PROCESSOR_PERIOD);
    long delay = conf.getTimeInMillis(Property.REPLICATION_WORK_PROCESSOR_DELAY);
    long period = conf.getTimeInMillis(Property.REPLICATION_WORK_PROCESSOR_PERIOD);
    try {
        DistributedWorkQueue workQueue;
        if (defaultDelay != delay && defaultPeriod != period) {
            log.debug("Configuration DistributedWorkQueue with delay and period of {} and {}", delay, period);
            workQueue = new DistributedWorkQueue(ZooUtil.getRoot(inst) + ReplicationConstants.ZOO_WORK_QUEUE, conf, delay, period);
        } else {
            log.debug("Configuring DistributedWorkQueue with default delay and period");
            workQueue = new DistributedWorkQueue(ZooUtil.getRoot(inst) + ReplicationConstants.ZOO_WORK_QUEUE, conf);
        }
        workQueue.startProcessing(new ReplicationProcessor(context, conf, fs), executor);
    } catch (KeeperException | InterruptedException e) {
        throw new RuntimeException(e);
    }
}
Also used : DefaultConfiguration(org.apache.accumulo.core.conf.DefaultConfiguration) DistributedWorkQueue(org.apache.accumulo.server.zookeeper.DistributedWorkQueue) KeeperException(org.apache.zookeeper.KeeperException)

Example 7 with DistributedWorkQueue

use of org.apache.accumulo.server.zookeeper.DistributedWorkQueue in project accumulo by apache.

the class LogSorter method startWatchingForRecoveryLogs.

public void startWatchingForRecoveryLogs(ThreadPoolExecutor distWorkQThreadPool) throws KeeperException, InterruptedException {
    this.threadPool = distWorkQThreadPool;
    new DistributedWorkQueue(ZooUtil.getRoot(instance) + Constants.ZRECOVERY, conf).startProcessing(new LogProcessor(), this.threadPool);
}
Also used : DistributedWorkQueue(org.apache.accumulo.server.zookeeper.DistributedWorkQueue)

Example 8 with DistributedWorkQueue

use of org.apache.accumulo.server.zookeeper.DistributedWorkQueue in project accumulo by apache.

the class SequentialWorkAssignerIT method reprocessingOfCompletedWorkRemovesWork.

@Test
public void reprocessingOfCompletedWorkRemovesWork() throws Exception {
    ReplicationTarget target = new ReplicationTarget("cluster1", "table1", Table.ID.of("1"));
    Text serializedTarget = target.toText();
    // Create two mutations, both of which need replication work done
    BatchWriter bw = ReplicationTable.getBatchWriter(conn);
    // We want the name of file2 to sort before file1
    String filename1 = "z_file1", filename2 = "a_file1";
    String file1 = "/accumulo/wal/tserver+port/" + filename1, file2 = "/accumulo/wal/tserver+port/" + filename2;
    // File1 was closed before file2, however
    Status stat1 = Status.newBuilder().setBegin(100).setEnd(100).setClosed(true).setInfiniteEnd(false).setCreatedTime(250).build();
    Status stat2 = Status.newBuilder().setBegin(0).setEnd(100).setClosed(true).setInfiniteEnd(false).setCreatedTime(500).build();
    Mutation m = new Mutation(file1);
    WorkSection.add(m, serializedTarget, ProtobufUtil.toValue(stat1));
    bw.addMutation(m);
    m = new Mutation(file2);
    WorkSection.add(m, serializedTarget, ProtobufUtil.toValue(stat2));
    bw.addMutation(m);
    m = OrderSection.createMutation(file1, stat1.getCreatedTime());
    OrderSection.add(m, target.getSourceTableId(), ProtobufUtil.toValue(stat1));
    bw.addMutation(m);
    m = OrderSection.createMutation(file2, stat2.getCreatedTime());
    OrderSection.add(m, target.getSourceTableId(), ProtobufUtil.toValue(stat2));
    bw.addMutation(m);
    bw.close();
    DistributedWorkQueue workQueue = createMock(DistributedWorkQueue.class);
    // Treat filename1 as we have already submitted it for replication
    Map<String, Map<Table.ID, String>> queuedWork = new HashMap<>();
    Map<Table.ID, String> queuedWorkForCluster = new HashMap<>();
    queuedWorkForCluster.put(target.getSourceTableId(), DistributedWorkQueueWorkAssignerHelper.getQueueKey(filename1, target));
    queuedWork.put("cluster1", queuedWorkForCluster);
    assigner.setQueuedWork(queuedWork);
    assigner.setWorkQueue(workQueue);
    assigner.setMaxQueueSize(Integer.MAX_VALUE);
    // Make sure we expect the invocations in the correct order (accumulo is sorted)
    workQueue.addWork(DistributedWorkQueueWorkAssignerHelper.getQueueKey(filename2, target), file2);
    expectLastCall().once();
    // file2 is queued because we remove file1 because it's fully replicated
    replay(workQueue);
    assigner.createWork();
    verify(workQueue);
    Assert.assertEquals(1, queuedWork.size());
    Assert.assertTrue(queuedWork.containsKey("cluster1"));
    Map<Table.ID, String> cluster1Work = queuedWork.get("cluster1");
    Assert.assertEquals(1, cluster1Work.size());
    Assert.assertTrue(cluster1Work.containsKey(target.getSourceTableId()));
    Assert.assertEquals(DistributedWorkQueueWorkAssignerHelper.getQueueKey(filename2, target), cluster1Work.get(target.getSourceTableId()));
}
Also used : Status(org.apache.accumulo.server.replication.proto.Replication.Status) Table(org.apache.accumulo.core.client.impl.Table) ReplicationTable(org.apache.accumulo.core.replication.ReplicationTable) HashMap(java.util.HashMap) Text(org.apache.hadoop.io.Text) DistributedWorkQueue(org.apache.accumulo.server.zookeeper.DistributedWorkQueue) ReplicationTarget(org.apache.accumulo.core.replication.ReplicationTarget) BatchWriter(org.apache.accumulo.core.client.BatchWriter) Mutation(org.apache.accumulo.core.data.Mutation) HashMap(java.util.HashMap) Map(java.util.Map) Test(org.junit.Test)

Example 9 with DistributedWorkQueue

use of org.apache.accumulo.server.zookeeper.DistributedWorkQueue in project accumulo by apache.

the class SequentialWorkAssignerIT method workAcrossPeersHappensConcurrently.

@Test
public void workAcrossPeersHappensConcurrently() throws Exception {
    ReplicationTarget target1 = new ReplicationTarget("cluster1", "table1", Table.ID.of("1"));
    Text serializedTarget1 = target1.toText();
    ReplicationTarget target2 = new ReplicationTarget("cluster2", "table1", Table.ID.of("1"));
    Text serializedTarget2 = target2.toText();
    // Create two mutations, both of which need replication work done
    BatchWriter bw = ReplicationTable.getBatchWriter(conn);
    // We want the name of file2 to sort before file1
    String filename1 = "z_file1", filename2 = "a_file1";
    String file1 = "/accumulo/wal/tserver+port/" + filename1, file2 = "/accumulo/wal/tserver+port/" + filename2;
    // File1 was closed before file2, however
    Status stat1 = Status.newBuilder().setBegin(0).setEnd(100).setClosed(true).setInfiniteEnd(false).setCreatedTime(250).build();
    Status stat2 = Status.newBuilder().setBegin(0).setEnd(100).setClosed(true).setInfiniteEnd(false).setCreatedTime(500).build();
    Mutation m = new Mutation(file1);
    WorkSection.add(m, serializedTarget1, ProtobufUtil.toValue(stat1));
    bw.addMutation(m);
    m = new Mutation(file2);
    WorkSection.add(m, serializedTarget2, ProtobufUtil.toValue(stat2));
    bw.addMutation(m);
    m = OrderSection.createMutation(file1, stat1.getCreatedTime());
    OrderSection.add(m, target1.getSourceTableId(), ProtobufUtil.toValue(stat1));
    bw.addMutation(m);
    m = OrderSection.createMutation(file2, stat2.getCreatedTime());
    OrderSection.add(m, target2.getSourceTableId(), ProtobufUtil.toValue(stat2));
    bw.addMutation(m);
    bw.close();
    DistributedWorkQueue workQueue = createMock(DistributedWorkQueue.class);
    Map<String, Map<Table.ID, String>> queuedWork = new HashMap<>();
    assigner.setQueuedWork(queuedWork);
    assigner.setWorkQueue(workQueue);
    assigner.setMaxQueueSize(Integer.MAX_VALUE);
    // Make sure we expect the invocations in the correct order (accumulo is sorted)
    workQueue.addWork(DistributedWorkQueueWorkAssignerHelper.getQueueKey(filename1, target1), file1);
    expectLastCall().once();
    workQueue.addWork(DistributedWorkQueueWorkAssignerHelper.getQueueKey(filename2, target2), file2);
    expectLastCall().once();
    // file2 is *not* queued because file1 must be replicated first
    replay(workQueue);
    assigner.createWork();
    verify(workQueue);
    Assert.assertEquals(2, queuedWork.size());
    Assert.assertTrue(queuedWork.containsKey("cluster1"));
    Map<Table.ID, String> cluster1Work = queuedWork.get("cluster1");
    Assert.assertEquals(1, cluster1Work.size());
    Assert.assertTrue(cluster1Work.containsKey(target1.getSourceTableId()));
    Assert.assertEquals(DistributedWorkQueueWorkAssignerHelper.getQueueKey(filename1, target1), cluster1Work.get(target1.getSourceTableId()));
    Map<Table.ID, String> cluster2Work = queuedWork.get("cluster2");
    Assert.assertEquals(1, cluster2Work.size());
    Assert.assertTrue(cluster2Work.containsKey(target2.getSourceTableId()));
    Assert.assertEquals(DistributedWorkQueueWorkAssignerHelper.getQueueKey(filename2, target2), cluster2Work.get(target2.getSourceTableId()));
}
Also used : Status(org.apache.accumulo.server.replication.proto.Replication.Status) Table(org.apache.accumulo.core.client.impl.Table) ReplicationTable(org.apache.accumulo.core.replication.ReplicationTable) HashMap(java.util.HashMap) Text(org.apache.hadoop.io.Text) DistributedWorkQueue(org.apache.accumulo.server.zookeeper.DistributedWorkQueue) ReplicationTarget(org.apache.accumulo.core.replication.ReplicationTarget) BatchWriter(org.apache.accumulo.core.client.BatchWriter) Mutation(org.apache.accumulo.core.data.Mutation) HashMap(java.util.HashMap) Map(java.util.Map) Test(org.junit.Test)

Example 10 with DistributedWorkQueue

use of org.apache.accumulo.server.zookeeper.DistributedWorkQueue in project accumulo by apache.

the class UnorderedWorkAssignerIT method doNotCreateWorkForFilesNotNeedingIt.

@Test
public void doNotCreateWorkForFilesNotNeedingIt() throws Exception {
    ReplicationTarget target1 = new ReplicationTarget("cluster1", "table1", Table.ID.of("1")), target2 = new ReplicationTarget("cluster1", "table2", Table.ID.of("2"));
    Text serializedTarget1 = target1.toText(), serializedTarget2 = target2.toText();
    // Create two mutations, both of which need replication work done
    BatchWriter bw = ReplicationTable.getBatchWriter(conn);
    String filename1 = UUID.randomUUID().toString(), filename2 = UUID.randomUUID().toString();
    String file1 = "/accumulo/wal/tserver+port/" + filename1, file2 = "/accumulo/wal/tserver+port/" + filename2;
    Mutation m = new Mutation(file1);
    WorkSection.add(m, serializedTarget1, StatusUtil.fileCreatedValue(5));
    bw.addMutation(m);
    m = new Mutation(file2);
    WorkSection.add(m, serializedTarget2, StatusUtil.fileCreatedValue(10));
    bw.addMutation(m);
    bw.close();
    DistributedWorkQueue workQueue = createMock(DistributedWorkQueue.class);
    HashSet<String> queuedWork = new HashSet<>();
    assigner.setQueuedWork(queuedWork);
    assigner.setMaxQueueSize(Integer.MAX_VALUE);
    replay(workQueue);
    assigner.createWork();
    verify(workQueue);
}
Also used : ReplicationTarget(org.apache.accumulo.core.replication.ReplicationTarget) Text(org.apache.hadoop.io.Text) BatchWriter(org.apache.accumulo.core.client.BatchWriter) Mutation(org.apache.accumulo.core.data.Mutation) DistributedWorkQueue(org.apache.accumulo.server.zookeeper.DistributedWorkQueue) HashSet(java.util.HashSet) Test(org.junit.Test)

Aggregations

DistributedWorkQueue (org.apache.accumulo.server.zookeeper.DistributedWorkQueue)15 Test (org.junit.Test)10 ReplicationTarget (org.apache.accumulo.core.replication.ReplicationTarget)9 BatchWriter (org.apache.accumulo.core.client.BatchWriter)7 Mutation (org.apache.accumulo.core.data.Mutation)7 Text (org.apache.hadoop.io.Text)6 HashMap (java.util.HashMap)5 HashSet (java.util.HashSet)5 Map (java.util.Map)5 Table (org.apache.accumulo.core.client.impl.Table)5 Status (org.apache.accumulo.server.replication.proto.Replication.Status)5 ReplicationTable (org.apache.accumulo.core.replication.ReplicationTable)4 Path (org.apache.hadoop.fs.Path)2 KeeperException (org.apache.zookeeper.KeeperException)2 BufferedReader (java.io.BufferedReader)1 IOException (java.io.IOException)1 InputStreamReader (java.io.InputStreamReader)1 UnknownHostException (java.net.UnknownHostException)1 LinkedHashSet (java.util.LinkedHashSet)1 TreeMap (java.util.TreeMap)1