Search in sources :

Example 61 with Status

use of org.apache.accumulo.server.replication.proto.Replication.Status in project accumulo by apache.

the class AccumuloReplicaSystemTest method onlyChooseMutationsForDesiredTableWithOpenStatus.

@Test
public void onlyChooseMutationsForDesiredTableWithOpenStatus() throws Exception {
    ByteArrayOutputStream baos = new ByteArrayOutputStream();
    DataOutputStream dos = new DataOutputStream(baos);
    LogFileKey key = new LogFileKey();
    LogFileValue value = new LogFileValue();
    // What is seq used for?
    key.seq = 1l;
    /*
     * Disclaimer: the following series of LogFileKey and LogFileValue pairs have *no* bearing whatsoever in reality regarding what these entries would actually
     * look like in a WAL. They are solely for testing that each LogEvents is handled, order is not important.
     */
    key.event = LogEvents.DEFINE_TABLET;
    key.tablet = new KeyExtent(Table.ID.of("1"), null, null);
    key.tid = 1;
    key.write(dos);
    value.write(dos);
    key.tablet = null;
    key.event = LogEvents.MUTATION;
    key.filename = "/accumulo/wals/tserver+port/" + UUID.randomUUID();
    value.mutations = Arrays.asList(new ServerMutation(new Text("row")));
    key.write(dos);
    value.write(dos);
    key.event = LogEvents.DEFINE_TABLET;
    key.tablet = new KeyExtent(Table.ID.of("2"), null, null);
    key.tid = 2;
    value.mutations = Collections.emptyList();
    key.write(dos);
    value.write(dos);
    key.event = LogEvents.OPEN;
    key.tid = LogFileKey.VERSION;
    key.tserverSession = "foobar";
    key.write(dos);
    value.write(dos);
    key.tablet = null;
    key.event = LogEvents.MUTATION;
    key.filename = "/accumulo/wals/tserver+port/" + UUID.randomUUID();
    value.mutations = Arrays.asList(new ServerMutation(new Text("badrow")));
    key.write(dos);
    value.write(dos);
    key.event = LogEvents.COMPACTION_START;
    key.tid = 2;
    key.filename = "/accumulo/tables/1/t-000001/A000001.rf";
    value.mutations = Collections.emptyList();
    key.write(dos);
    value.write(dos);
    key.event = LogEvents.DEFINE_TABLET;
    key.tablet = new KeyExtent(Table.ID.of("1"), null, null);
    key.tid = 3;
    value.mutations = Collections.emptyList();
    key.write(dos);
    value.write(dos);
    key.event = LogEvents.COMPACTION_FINISH;
    key.tid = 6;
    value.mutations = Collections.emptyList();
    key.write(dos);
    value.write(dos);
    key.tablet = null;
    key.event = LogEvents.MUTATION;
    key.tid = 3;
    key.filename = "/accumulo/wals/tserver+port/" + UUID.randomUUID();
    value.mutations = Arrays.asList(new ServerMutation(new Text("row")));
    key.write(dos);
    value.write(dos);
    dos.close();
    Map<String, String> confMap = new HashMap<>();
    confMap.put(Property.REPLICATION_NAME.getKey(), "source");
    AccumuloConfiguration conf = new ConfigurationCopy(confMap);
    AccumuloReplicaSystem ars = new AccumuloReplicaSystem();
    ars.setConf(conf);
    Status status = Status.newBuilder().setBegin(0).setEnd(0).setInfiniteEnd(true).setClosed(false).build();
    DataInputStream dis = new DataInputStream(new ByteArrayInputStream(baos.toByteArray()));
    WalReplication repl = ars.getWalEdits(new ReplicationTarget("peer", "1", Table.ID.of("1")), dis, new Path("/accumulo/wals/tserver+port/wal"), status, Long.MAX_VALUE, new HashSet<>());
    // We stopped because we got to the end of the file
    Assert.assertEquals(9, repl.entriesConsumed);
    Assert.assertEquals(2, repl.walEdits.getEditsSize());
    Assert.assertEquals(2, repl.sizeInRecords);
    Assert.assertNotEquals(0, repl.sizeInBytes);
}
Also used : Status(org.apache.accumulo.server.replication.proto.Replication.Status) Path(org.apache.hadoop.fs.Path) ConfigurationCopy(org.apache.accumulo.core.conf.ConfigurationCopy) HashMap(java.util.HashMap) DataOutputStream(java.io.DataOutputStream) WalReplication(org.apache.accumulo.tserver.replication.AccumuloReplicaSystem.WalReplication) ServerMutation(org.apache.accumulo.server.data.ServerMutation) Text(org.apache.hadoop.io.Text) ByteArrayOutputStream(java.io.ByteArrayOutputStream) LogFileKey(org.apache.accumulo.tserver.logger.LogFileKey) DataInputStream(java.io.DataInputStream) KeyExtent(org.apache.accumulo.core.data.impl.KeyExtent) ReplicationTarget(org.apache.accumulo.core.replication.ReplicationTarget) ByteArrayInputStream(java.io.ByteArrayInputStream) LogFileValue(org.apache.accumulo.tserver.logger.LogFileValue) AccumuloConfiguration(org.apache.accumulo.core.conf.AccumuloConfiguration) Test(org.junit.Test)

Example 62 with Status

use of org.apache.accumulo.server.replication.proto.Replication.Status in project accumulo by apache.

the class ReplicationProcessorTest method filesWhichMakeNoProgressArentReplicatedAgain.

@Test
public void filesWhichMakeNoProgressArentReplicatedAgain() throws Exception {
    ReplicaSystem replica = EasyMock.createMock(ReplicaSystem.class);
    ReplicaSystemHelper helper = EasyMock.createMock(ReplicaSystemHelper.class);
    ReplicationProcessor proc = EasyMock.createMockBuilder(ReplicationProcessor.class).addMockedMethods("getReplicaSystem", "doesFileExist", "getStatus", "getHelper").createMock();
    ReplicationTarget target = new ReplicationTarget("peer", "1", Table.ID.of("1"));
    Status status = Status.newBuilder().setBegin(0).setEnd(0).setInfiniteEnd(true).setClosed(true).build();
    Path path = new Path("/accumulo");
    String queueKey = DistributedWorkQueueWorkAssignerHelper.getQueueKey(path.toString(), target);
    EasyMock.expect(proc.getReplicaSystem(target)).andReturn(replica);
    EasyMock.expect(proc.getStatus(path.toString(), target)).andReturn(status);
    EasyMock.expect(proc.doesFileExist(path, target)).andReturn(true);
    EasyMock.expect(proc.getHelper()).andReturn(helper);
    EasyMock.expect(replica.replicate(path, status, target, helper)).andReturn(status);
    EasyMock.replay(replica, proc);
    proc.process(queueKey, path.toString().getBytes(UTF_8));
    EasyMock.verify(replica, proc);
}
Also used : Status(org.apache.accumulo.server.replication.proto.Replication.Status) Path(org.apache.hadoop.fs.Path) ReplicationTarget(org.apache.accumulo.core.replication.ReplicationTarget) ReplicaSystemHelper(org.apache.accumulo.server.replication.ReplicaSystemHelper) ReplicaSystem(org.apache.accumulo.server.replication.ReplicaSystem) Test(org.junit.Test)

Example 63 with Status

use of org.apache.accumulo.server.replication.proto.Replication.Status in project accumulo by apache.

the class RemoveCompleteReplicationRecordsIT method replicatedClosedWorkRecordsAreNotRemovedWithoutClosedStatusRecords.

@Test
public void replicatedClosedWorkRecordsAreNotRemovedWithoutClosedStatusRecords() throws Exception {
    BatchWriter replBw = ReplicationTable.getBatchWriter(conn);
    int numRecords = 3;
    Status.Builder builder = Status.newBuilder();
    builder.setClosed(false);
    builder.setEnd(10000);
    builder.setInfiniteEnd(false);
    // Write out numRecords entries to both replication and metadata tables, none of which are fully replicated
    for (int i = 0; i < numRecords; i++) {
        String file = "/accumulo/wal/tserver+port/" + UUID.randomUUID();
        Mutation m = new Mutation(file);
        StatusSection.add(m, createTableId(i), ProtobufUtil.toValue(builder.setBegin(1000 * (i + 1)).build()));
        replBw.addMutation(m);
    }
    // Add two records that we can delete
    String fileToRemove = "/accumulo/wal/tserver+port/" + UUID.randomUUID();
    Mutation m = new Mutation(fileToRemove);
    StatusSection.add(m, Table.ID.of("5"), ProtobufUtil.toValue(builder.setBegin(10000).setEnd(10000).setClosed(false).build()));
    replBw.addMutation(m);
    numRecords++;
    fileToRemove = "/accumulo/wal/tserver+port/" + UUID.randomUUID();
    m = new Mutation(fileToRemove);
    StatusSection.add(m, Table.ID.of("6"), ProtobufUtil.toValue(builder.setBegin(10000).setEnd(10000).setClosed(false).build()));
    replBw.addMutation(m);
    numRecords++;
    replBw.flush();
    // Make sure that we have the expected number of records in both tables
    Assert.assertEquals(numRecords, Iterables.size(ReplicationTable.getScanner(conn)));
    // We should not remove any records because they're missing closed status
    try (BatchScanner bs = ReplicationTable.getBatchScanner(conn, 1)) {
        bs.setRanges(Collections.singleton(new Range()));
        IteratorSetting cfg = new IteratorSetting(50, WholeRowIterator.class);
        bs.addScanIterator(cfg);
        try {
            Assert.assertEquals(0l, rcrr.removeCompleteRecords(conn, bs, replBw));
        } finally {
            replBw.close();
        }
    }
}
Also used : Status(org.apache.accumulo.server.replication.proto.Replication.Status) IteratorSetting(org.apache.accumulo.core.client.IteratorSetting) BatchScanner(org.apache.accumulo.core.client.BatchScanner) BatchWriter(org.apache.accumulo.core.client.BatchWriter) Mutation(org.apache.accumulo.core.data.Mutation) Range(org.apache.accumulo.core.data.Range) Test(org.junit.Test)

Example 64 with Status

use of org.apache.accumulo.server.replication.proto.Replication.Status in project accumulo by apache.

the class ReplicationIT method replicationRecordsAreClosedAfterGarbageCollection.

@Test
public void replicationRecordsAreClosedAfterGarbageCollection() throws Exception {
    getCluster().getClusterControl().stop(ServerType.GARBAGE_COLLECTOR);
    final Connector conn = getConnector();
    ReplicationTable.setOnline(conn);
    conn.securityOperations().grantTablePermission("root", ReplicationTable.NAME, TablePermission.WRITE);
    conn.tableOperations().deleteRows(ReplicationTable.NAME, null, null);
    final AtomicBoolean keepRunning = new AtomicBoolean(true);
    final Set<String> metadataWals = new HashSet<>();
    Thread t = new Thread(new Runnable() {

        @Override
        public void run() {
            // when that happens
            while (keepRunning.get()) {
                try {
                    metadataWals.addAll(getLogs(conn).keySet());
                } catch (Exception e) {
                    log.error("Metadata table doesn't exist");
                }
            }
        }
    });
    t.start();
    String table1 = "table1", table2 = "table2", table3 = "table3";
    try {
        conn.tableOperations().create(table1);
        conn.tableOperations().setProperty(table1, Property.TABLE_REPLICATION.getKey(), "true");
        conn.tableOperations().setProperty(table1, Property.TABLE_REPLICATION_TARGET.getKey() + "cluster1", "1");
        conn.instanceOperations().setProperty(Property.REPLICATION_PEERS.getKey() + "cluster1", ReplicaSystemFactory.getPeerConfigurationValue(MockReplicaSystem.class, null));
        // Write some data to table1
        writeSomeData(conn, table1, 200, 500);
        conn.tableOperations().create(table2);
        conn.tableOperations().setProperty(table2, Property.TABLE_REPLICATION.getKey(), "true");
        conn.tableOperations().setProperty(table2, Property.TABLE_REPLICATION_TARGET.getKey() + "cluster1", "1");
        writeSomeData(conn, table2, 200, 500);
        conn.tableOperations().create(table3);
        conn.tableOperations().setProperty(table3, Property.TABLE_REPLICATION.getKey(), "true");
        conn.tableOperations().setProperty(table3, Property.TABLE_REPLICATION_TARGET.getKey() + "cluster1", "1");
        writeSomeData(conn, table3, 200, 500);
        // Flush everything to try to make the replication records
        for (String table : Arrays.asList(table1, table2, table3)) {
            conn.tableOperations().compact(table, null, null, true, true);
        }
    } finally {
        keepRunning.set(false);
        t.join(5000);
        Assert.assertFalse(t.isAlive());
    }
    // Kill the tserver(s) and restart them
    // to ensure that the WALs we previously observed all move to closed.
    cluster.getClusterControl().stop(ServerType.TABLET_SERVER);
    cluster.getClusterControl().start(ServerType.TABLET_SERVER);
    // Make sure we can read all the tables (recovery complete)
    for (String table : Arrays.asList(table1, table2, table3)) {
        Iterators.size(conn.createScanner(table, Authorizations.EMPTY).iterator());
    }
    // Starting the gc will run CloseWriteAheadLogReferences which will first close Statuses
    // in the metadata table, and then in the replication table
    Process gc = cluster.exec(SimpleGarbageCollector.class);
    waitForGCLock(conn);
    Thread.sleep(1000);
    log.info("GC is up and should have had time to run at least once by now");
    try {
        boolean allClosed = true;
        // After they're closed, they are candidates for deletion
        for (int i = 0; i < 10; i++) {
            try (Scanner s = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY)) {
                s.setRange(Range.prefix(ReplicationSection.getRowPrefix()));
                Iterator<Entry<Key, Value>> iter = s.iterator();
                long recordsFound = 0l;
                while (allClosed && iter.hasNext()) {
                    Entry<Key, Value> entry = iter.next();
                    String wal = entry.getKey().getRow().toString();
                    if (metadataWals.contains(wal)) {
                        Status status = Status.parseFrom(entry.getValue().get());
                        log.info("{}={}", entry.getKey().toStringNoTruncate(), ProtobufUtil.toString(status));
                        allClosed &= status.getClosed();
                        recordsFound++;
                    }
                }
                log.info("Found {} records from the metadata table", recordsFound);
                if (allClosed) {
                    break;
                }
                sleepUninterruptibly(2, TimeUnit.SECONDS);
            }
        }
        if (!allClosed) {
            try (Scanner s = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY)) {
                s.setRange(Range.prefix(ReplicationSection.getRowPrefix()));
                for (Entry<Key, Value> entry : s) {
                    log.info("{} {}", entry.getKey().toStringNoTruncate(), ProtobufUtil.toString(Status.parseFrom(entry.getValue().get())));
                }
                Assert.fail("Expected all replication records in the metadata table to be closed");
            }
        }
        for (int i = 0; i < 10; i++) {
            allClosed = true;
            try (Scanner s = ReplicationTable.getScanner(conn)) {
                Iterator<Entry<Key, Value>> iter = s.iterator();
                long recordsFound = 0l;
                while (allClosed && iter.hasNext()) {
                    Entry<Key, Value> entry = iter.next();
                    String wal = entry.getKey().getRow().toString();
                    if (metadataWals.contains(wal)) {
                        Status status = Status.parseFrom(entry.getValue().get());
                        log.info("{}={}", entry.getKey().toStringNoTruncate(), ProtobufUtil.toString(status));
                        allClosed &= status.getClosed();
                        recordsFound++;
                    }
                }
                log.info("Found {} records from the replication table", recordsFound);
                if (allClosed) {
                    break;
                }
                sleepUninterruptibly(3, TimeUnit.SECONDS);
            }
        }
        if (!allClosed) {
            try (Scanner s = ReplicationTable.getScanner(conn)) {
                StatusSection.limit(s);
                for (Entry<Key, Value> entry : s) {
                    log.info("{} {}", entry.getKey().toStringNoTruncate(), TextFormat.shortDebugString(Status.parseFrom(entry.getValue().get())));
                }
                Assert.fail("Expected all replication records in the replication table to be closed");
            }
        }
    } finally {
        gc.destroy();
        gc.waitFor();
    }
}
Also used : Status(org.apache.accumulo.server.replication.proto.Replication.Status) Connector(org.apache.accumulo.core.client.Connector) Scanner(org.apache.accumulo.core.client.Scanner) TableOfflineException(org.apache.accumulo.core.client.TableOfflineException) URISyntaxException(java.net.URISyntaxException) TableNotFoundException(org.apache.accumulo.core.client.TableNotFoundException) ReplicationTableOfflineException(org.apache.accumulo.core.replication.ReplicationTableOfflineException) AccumuloSecurityException(org.apache.accumulo.core.client.AccumuloSecurityException) NoSuchElementException(java.util.NoSuchElementException) AccumuloException(org.apache.accumulo.core.client.AccumuloException) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) Entry(java.util.Map.Entry) LogEntry(org.apache.accumulo.core.tabletserver.log.LogEntry) Value(org.apache.accumulo.core.data.Value) Key(org.apache.accumulo.core.data.Key) HashSet(java.util.HashSet) Test(org.junit.Test)

Example 65 with Status

use of org.apache.accumulo.server.replication.proto.Replication.Status in project accumulo by apache.

the class ReplicationIT method combinerWorksOnMetadata.

@Test
public void combinerWorksOnMetadata() throws Exception {
    Connector conn = getConnector();
    conn.securityOperations().grantTablePermission("root", MetadataTable.NAME, TablePermission.WRITE);
    ReplicationTableUtil.configureMetadataTable(conn, MetadataTable.NAME);
    Status stat1 = StatusUtil.fileCreated(100);
    Status stat2 = StatusUtil.fileClosed();
    BatchWriter bw = conn.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
    Mutation m = new Mutation(ReplicationSection.getRowPrefix() + "file:/accumulo/wals/tserver+port/uuid");
    m.put(ReplicationSection.COLF, new Text("1"), ProtobufUtil.toValue(stat1));
    bw.addMutation(m);
    bw.close();
    Status actual;
    try (Scanner s = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY)) {
        s.setRange(ReplicationSection.getRange());
        actual = Status.parseFrom(Iterables.getOnlyElement(s).getValue().get());
        Assert.assertEquals(stat1, actual);
        bw = conn.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
        m = new Mutation(ReplicationSection.getRowPrefix() + "file:/accumulo/wals/tserver+port/uuid");
        m.put(ReplicationSection.COLF, new Text("1"), ProtobufUtil.toValue(stat2));
        bw.addMutation(m);
        bw.close();
    }
    try (Scanner s = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY)) {
        s.setRange(ReplicationSection.getRange());
        actual = Status.parseFrom(Iterables.getOnlyElement(s).getValue().get());
        Status expected = Status.newBuilder().setBegin(0).setEnd(0).setClosed(true).setInfiniteEnd(true).setCreatedTime(100).build();
        Assert.assertEquals(expected, actual);
    }
}
Also used : Status(org.apache.accumulo.server.replication.proto.Replication.Status) Connector(org.apache.accumulo.core.client.Connector) Scanner(org.apache.accumulo.core.client.Scanner) BatchWriterConfig(org.apache.accumulo.core.client.BatchWriterConfig) Text(org.apache.hadoop.io.Text) BatchWriter(org.apache.accumulo.core.client.BatchWriter) Mutation(org.apache.accumulo.core.data.Mutation) Test(org.junit.Test)

Aggregations

Status (org.apache.accumulo.server.replication.proto.Replication.Status)77 Test (org.junit.Test)57 Mutation (org.apache.accumulo.core.data.Mutation)30 Text (org.apache.hadoop.io.Text)29 BatchWriter (org.apache.accumulo.core.client.BatchWriter)28 Key (org.apache.accumulo.core.data.Key)27 Value (org.apache.accumulo.core.data.Value)26 Scanner (org.apache.accumulo.core.client.Scanner)21 ReplicationTarget (org.apache.accumulo.core.replication.ReplicationTarget)20 Path (org.apache.hadoop.fs.Path)17 HashMap (java.util.HashMap)14 BatchWriterConfig (org.apache.accumulo.core.client.BatchWriterConfig)14 Table (org.apache.accumulo.core.client.impl.Table)14 ReplicationTable (org.apache.accumulo.core.replication.ReplicationTable)13 AccumuloSecurityException (org.apache.accumulo.core.client.AccumuloSecurityException)12 AccumuloException (org.apache.accumulo.core.client.AccumuloException)11 Connector (org.apache.accumulo.core.client.Connector)11 InvalidProtocolBufferException (com.google.protobuf.InvalidProtocolBufferException)10 TableNotFoundException (org.apache.accumulo.core.client.TableNotFoundException)10 DataInputStream (java.io.DataInputStream)9