Search in sources :

Example 21 with Status

use of org.apache.accumulo.server.replication.proto.Replication.Status in project accumulo by apache.

the class StatusTest method equality.

@Test
public void equality() {
    Status replicated = Status.newBuilder().setBegin(Long.MAX_VALUE).setEnd(0).setInfiniteEnd(true).setClosed(false).build();
    Status unreplicated = Status.newBuilder().setBegin(0).setEnd(0).setInfiniteEnd(true).setClosed(false).build();
    Assert.assertFalse(replicated.equals(unreplicated));
}
Also used : Status(org.apache.accumulo.server.replication.proto.Replication.Status) Test(org.junit.Test)

Example 22 with Status

use of org.apache.accumulo.server.replication.proto.Replication.Status in project accumulo by apache.

the class FinishedWorkUpdaterIT method recordsWithProgressUpdateBothTables.

@Test
public void recordsWithProgressUpdateBothTables() throws Exception {
    conn.securityOperations().grantTablePermission(conn.whoami(), ReplicationTable.NAME, TablePermission.READ);
    conn.securityOperations().grantTablePermission(conn.whoami(), ReplicationTable.NAME, TablePermission.WRITE);
    ReplicationTable.setOnline(conn);
    String file = "/accumulo/wals/tserver+port/" + UUID.randomUUID();
    Status stat = Status.newBuilder().setBegin(100).setEnd(200).setClosed(true).setInfiniteEnd(false).build();
    ReplicationTarget target = new ReplicationTarget("peer", "table1", Table.ID.of("1"));
    // Create a single work record for a file to some peer
    BatchWriter bw = ReplicationTable.getBatchWriter(conn);
    Mutation m = new Mutation(file);
    WorkSection.add(m, target.toText(), ProtobufUtil.toValue(stat));
    bw.addMutation(m);
    bw.close();
    updater.run();
    try (Scanner s = ReplicationTable.getScanner(conn)) {
        s.setRange(Range.exact(file));
        StatusSection.limit(s);
        Entry<Key, Value> entry = Iterables.getOnlyElement(s);
        Assert.assertEquals(entry.getKey().getColumnFamily(), StatusSection.NAME);
        Assert.assertEquals(entry.getKey().getColumnQualifier().toString(), target.getSourceTableId().canonicalID());
        // We should only rely on the correct begin attribute being returned
        Status actual = Status.parseFrom(entry.getValue().get());
        Assert.assertEquals(stat.getBegin(), actual.getBegin());
    }
}
Also used : Status(org.apache.accumulo.server.replication.proto.Replication.Status) Scanner(org.apache.accumulo.core.client.Scanner) ReplicationTarget(org.apache.accumulo.core.replication.ReplicationTarget) Value(org.apache.accumulo.core.data.Value) BatchWriter(org.apache.accumulo.core.client.BatchWriter) Mutation(org.apache.accumulo.core.data.Mutation) Key(org.apache.accumulo.core.data.Key) Test(org.junit.Test)

Example 23 with Status

use of org.apache.accumulo.server.replication.proto.Replication.Status in project accumulo by apache.

the class FinishedWorkUpdaterIT method chooseMinimumBeginOffset.

@Test
public void chooseMinimumBeginOffset() throws Exception {
    conn.securityOperations().grantTablePermission(conn.whoami(), ReplicationTable.NAME, TablePermission.READ);
    conn.securityOperations().grantTablePermission(conn.whoami(), ReplicationTable.NAME, TablePermission.WRITE);
    ReplicationTable.setOnline(conn);
    String file = "/accumulo/wals/tserver+port/" + UUID.randomUUID();
    // @formatter:off
    Status stat1 = Status.newBuilder().setBegin(100).setEnd(1000).setClosed(true).setInfiniteEnd(false).build(), stat2 = Status.newBuilder().setBegin(500).setEnd(1000).setClosed(true).setInfiniteEnd(false).build(), stat3 = Status.newBuilder().setBegin(1).setEnd(1000).setClosed(true).setInfiniteEnd(false).build();
    ReplicationTarget target1 = new ReplicationTarget("peer1", "table1", Table.ID.of("1")), target2 = new ReplicationTarget("peer2", "table2", Table.ID.of("1")), target3 = new ReplicationTarget("peer3", "table3", Table.ID.of("1"));
    // @formatter:on
    // Create a single work record for a file to some peer
    BatchWriter bw = ReplicationTable.getBatchWriter(conn);
    Mutation m = new Mutation(file);
    WorkSection.add(m, target1.toText(), ProtobufUtil.toValue(stat1));
    WorkSection.add(m, target2.toText(), ProtobufUtil.toValue(stat2));
    WorkSection.add(m, target3.toText(), ProtobufUtil.toValue(stat3));
    bw.addMutation(m);
    bw.close();
    updater.run();
    try (Scanner s = ReplicationTable.getScanner(conn)) {
        s.setRange(Range.exact(file));
        StatusSection.limit(s);
        Entry<Key, Value> entry = Iterables.getOnlyElement(s);
        Assert.assertEquals(entry.getKey().getColumnFamily(), StatusSection.NAME);
        Assert.assertEquals(entry.getKey().getColumnQualifier().toString(), target1.getSourceTableId().canonicalID());
        // We should only rely on the correct begin attribute being returned
        Status actual = Status.parseFrom(entry.getValue().get());
        Assert.assertEquals(1, actual.getBegin());
    }
}
Also used : Status(org.apache.accumulo.server.replication.proto.Replication.Status) Scanner(org.apache.accumulo.core.client.Scanner) ReplicationTarget(org.apache.accumulo.core.replication.ReplicationTarget) Value(org.apache.accumulo.core.data.Value) BatchWriter(org.apache.accumulo.core.client.BatchWriter) Mutation(org.apache.accumulo.core.data.Mutation) Key(org.apache.accumulo.core.data.Key) Test(org.junit.Test)

Example 24 with Status

use of org.apache.accumulo.server.replication.proto.Replication.Status in project accumulo by apache.

the class GarbageCollectorCommunicatesWithTServersIT method getMetadataStatusForTable.

/**
 * Get the replication status messages for the given table that exist in the metadata table (~repl entries)
 */
private Map<String, Status> getMetadataStatusForTable(String tableName) throws Exception {
    final Connector conn = getConnector();
    final String tableId = conn.tableOperations().tableIdMap().get(tableName);
    Assert.assertNotNull("Could not determine table ID for " + tableName, tableId);
    Map<String, Status> fileToStatus = new HashMap<>();
    try (Scanner s = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY)) {
        Range r = MetadataSchema.ReplicationSection.getRange();
        s.setRange(r);
        s.fetchColumn(MetadataSchema.ReplicationSection.COLF, new Text(tableId));
        for (Entry<Key, Value> entry : s) {
            Text file = new Text();
            MetadataSchema.ReplicationSection.getFile(entry.getKey(), file);
            Status status = Status.parseFrom(entry.getValue().get());
            log.info("Got status for {}: {}", file, ProtobufUtil.toString(status));
            fileToStatus.put(file.toString(), status);
        }
    }
    return fileToStatus;
}
Also used : Status(org.apache.accumulo.server.replication.proto.Replication.Status) Connector(org.apache.accumulo.core.client.Connector) Scanner(org.apache.accumulo.core.client.Scanner) HashMap(java.util.HashMap) Value(org.apache.accumulo.core.data.Value) Text(org.apache.hadoop.io.Text) Range(org.apache.accumulo.core.data.Range) Key(org.apache.accumulo.core.data.Key)

Example 25 with Status

use of org.apache.accumulo.server.replication.proto.Replication.Status in project accumulo by apache.

the class GarbageCollectorCommunicatesWithTServersIT method testUnreferencedWalInTserverIsClosed.

@Test(timeout = 2 * 60 * 1000)
public void testUnreferencedWalInTserverIsClosed() throws Exception {
    final String[] names = getUniqueNames(2);
    // `table` will be replicated, `otherTable` is only used to roll the WAL on the tserver
    final String table = names[0], otherTable = names[1];
    final Connector conn = getConnector();
    // Bring the replication table online first and foremost
    ReplicationTable.setOnline(conn);
    log.info("Creating {}", table);
    conn.tableOperations().create(table);
    conn.tableOperations().setProperty(table, Property.TABLE_REPLICATION.getKey(), "true");
    log.info("Writing a few mutations to the table");
    BatchWriter bw = conn.createBatchWriter(table, null);
    byte[] empty = new byte[0];
    for (int i = 0; i < 5; i++) {
        Mutation m = new Mutation(Integer.toString(i));
        m.put(empty, empty, empty);
        bw.addMutation(m);
    }
    log.info("Flushing mutations to the server");
    bw.close();
    log.info("Checking that metadata only has one WAL recorded for this table");
    Set<String> wals = getWalsForTable(table);
    Assert.assertEquals("Expected to only find two WAL for the table", 2, wals.size());
    log.info("Compacting the table which will remove all WALs from the tablets");
    // Flush our test table to remove the WAL references in it
    conn.tableOperations().flush(table, null, null, true);
    // Flush the metadata table too because it will have a reference to the WAL
    conn.tableOperations().flush(MetadataTable.NAME, null, null, true);
    log.info("Fetching replication statuses from metadata table");
    Map<String, Status> fileToStatus = getMetadataStatusForTable(table);
    Assert.assertEquals("Expected to only find one replication status message", 1, fileToStatus.size());
    String walName = fileToStatus.keySet().iterator().next();
    Assert.assertTrue("Expected log file name from tablet to equal replication entry", wals.contains(walName));
    Status status = fileToStatus.get(walName);
    Assert.assertEquals("Expected Status for file to not be closed", false, status.getClosed());
    Set<String> filesForTable = getFilesForTable(table);
    Assert.assertEquals("Expected to only find one rfile for table", 1, filesForTable.size());
    log.info("Files for table before MajC: {}", filesForTable);
    // Issue a MajC to roll a new file in HDFS
    conn.tableOperations().compact(table, null, null, false, true);
    Set<String> filesForTableAfterCompaction = getFilesForTable(table);
    log.info("Files for table after MajC: {}", filesForTableAfterCompaction);
    Assert.assertEquals("Expected to only find one rfile for table", 1, filesForTableAfterCompaction.size());
    Assert.assertNotEquals("Expected the files before and after compaction to differ", filesForTableAfterCompaction, filesForTable);
    // Use the rfile which was just replaced by the MajC to determine when the GC has ran
    Path fileToBeDeleted = new Path(filesForTable.iterator().next());
    FileSystem fs = getCluster().getFileSystem();
    boolean fileExists = fs.exists(fileToBeDeleted);
    while (fileExists) {
        log.info("File which should get deleted still exists: {}", fileToBeDeleted);
        Thread.sleep(2000);
        fileExists = fs.exists(fileToBeDeleted);
    }
    // At this point in time, we *know* that the GarbageCollector has run which means that the Status
    // for our WAL should not be altered.
    Map<String, Status> fileToStatusAfterMinc = getMetadataStatusForTable(table);
    Assert.assertEquals("Expected to still find only one replication status message: " + fileToStatusAfterMinc, 1, fileToStatusAfterMinc.size());
    /*
     * To verify that the WALs is still getting closed, we have to force the tserver to close the existing WAL and open a new one instead. The easiest way to do
     * this is to write a load of data that will exceed the 1.33% full threshold that the logger keeps track of
     */
    conn.tableOperations().create(otherTable);
    bw = conn.createBatchWriter(otherTable, null);
    // 500k
    byte[] bigValue = new byte[1024 * 500];
    Arrays.fill(bigValue, (byte) 1);
    // 500k * 50
    for (int i = 0; i < 50; i++) {
        Mutation m = new Mutation(Integer.toString(i));
        m.put(empty, empty, bigValue);
        bw.addMutation(m);
        if (i % 10 == 0) {
            bw.flush();
        }
    }
    bw.close();
    conn.tableOperations().flush(otherTable, null, null, true);
    // Get the tservers which the master deems as active
    final ClientContext context = new ClientContext(conn.getInstance(), new Credentials("root", new PasswordToken(ConfigurableMacBase.ROOT_PASSWORD)), getClientConfig());
    List<String> tservers = MasterClient.execute(context, new ClientExecReturn<List<String>, MasterClientService.Client>() {

        @Override
        public List<String> execute(MasterClientService.Client client) throws Exception {
            return client.getActiveTservers(Tracer.traceInfo(), context.rpcCreds());
        }
    });
    Assert.assertEquals("Expected only one active tservers", 1, tservers.size());
    HostAndPort tserver = HostAndPort.fromString(tservers.get(0));
    // Get the active WALs from that server
    log.info("Fetching active WALs from {}", tserver);
    Client client = ThriftUtil.getTServerClient(tserver, context);
    List<String> activeWalsForTserver = client.getActiveLogs(Tracer.traceInfo(), context.rpcCreds());
    log.info("Active wals: {}", activeWalsForTserver);
    Assert.assertEquals("Expected to find only one active WAL", 1, activeWalsForTserver.size());
    String activeWal = new Path(activeWalsForTserver.get(0)).toString();
    Assert.assertNotEquals("Current active WAL on tserver should not be the original WAL we saw", walName, activeWal);
    log.info("Ensuring that replication status does get closed after WAL is no longer in use by Tserver");
    do {
        Map<String, Status> replicationStatuses = getMetadataStatusForTable(table);
        log.info("Got replication status messages {}", replicationStatuses);
        Assert.assertEquals("Did not expect to find additional status records", 1, replicationStatuses.size());
        status = replicationStatuses.values().iterator().next();
        log.info("Current status: {}", ProtobufUtil.toString(status));
        if (status.getClosed()) {
            return;
        }
        log.info("Status is not yet closed, waiting for garbage collector to close it");
        Thread.sleep(2000);
    } while (true);
}
Also used : Connector(org.apache.accumulo.core.client.Connector) HostAndPort(org.apache.accumulo.core.util.HostAndPort) PasswordToken(org.apache.accumulo.core.client.security.tokens.PasswordToken) FileSystem(org.apache.hadoop.fs.FileSystem) RawLocalFileSystem(org.apache.hadoop.fs.RawLocalFileSystem) List(java.util.List) MasterClient(org.apache.accumulo.core.client.impl.MasterClient) Client(org.apache.accumulo.core.tabletserver.thrift.TabletClientService.Client) Status(org.apache.accumulo.server.replication.proto.Replication.Status) Path(org.apache.hadoop.fs.Path) ClientContext(org.apache.accumulo.core.client.impl.ClientContext) MasterClientService(org.apache.accumulo.core.master.thrift.MasterClientService) BatchWriter(org.apache.accumulo.core.client.BatchWriter) Mutation(org.apache.accumulo.core.data.Mutation) Credentials(org.apache.accumulo.core.client.impl.Credentials) Test(org.junit.Test)

Aggregations

Status (org.apache.accumulo.server.replication.proto.Replication.Status)77 Test (org.junit.Test)57 Mutation (org.apache.accumulo.core.data.Mutation)30 Text (org.apache.hadoop.io.Text)29 BatchWriter (org.apache.accumulo.core.client.BatchWriter)28 Key (org.apache.accumulo.core.data.Key)27 Value (org.apache.accumulo.core.data.Value)26 Scanner (org.apache.accumulo.core.client.Scanner)21 ReplicationTarget (org.apache.accumulo.core.replication.ReplicationTarget)20 Path (org.apache.hadoop.fs.Path)17 HashMap (java.util.HashMap)14 BatchWriterConfig (org.apache.accumulo.core.client.BatchWriterConfig)14 Table (org.apache.accumulo.core.client.impl.Table)14 ReplicationTable (org.apache.accumulo.core.replication.ReplicationTable)13 AccumuloSecurityException (org.apache.accumulo.core.client.AccumuloSecurityException)12 AccumuloException (org.apache.accumulo.core.client.AccumuloException)11 Connector (org.apache.accumulo.core.client.Connector)11 InvalidProtocolBufferException (com.google.protobuf.InvalidProtocolBufferException)10 TableNotFoundException (org.apache.accumulo.core.client.TableNotFoundException)10 DataInputStream (java.io.DataInputStream)9