Search in sources :

Example 71 with Status

use of org.apache.accumulo.server.replication.proto.Replication.Status in project accumulo by apache.

the class StatusCombinerMacIT method test.

@Test
public void test() throws Exception {
    Connector conn = getConnector();
    ClusterUser user = getAdminUser();
    ReplicationTable.setOnline(conn);
    conn.securityOperations().grantTablePermission(user.getPrincipal(), ReplicationTable.NAME, TablePermission.WRITE);
    BatchWriter bw = ReplicationTable.getBatchWriter(conn);
    long createTime = System.currentTimeMillis();
    try {
        Mutation m = new Mutation("file:/accumulo/wal/HW10447.local+56808/93cdc17e-7521-44fa-87b5-37f45bcb92d3");
        StatusSection.add(m, Table.ID.of("1"), StatusUtil.fileCreatedValue(createTime));
        bw.addMutation(m);
    } finally {
        bw.close();
    }
    Entry<Key, Value> entry;
    try (Scanner s = ReplicationTable.getScanner(conn)) {
        entry = Iterables.getOnlyElement(s);
        Assert.assertEquals(StatusUtil.fileCreatedValue(createTime), entry.getValue());
        bw = ReplicationTable.getBatchWriter(conn);
        try {
            Mutation m = new Mutation("file:/accumulo/wal/HW10447.local+56808/93cdc17e-7521-44fa-87b5-37f45bcb92d3");
            StatusSection.add(m, Table.ID.of("1"), ProtobufUtil.toValue(StatusUtil.replicated(Long.MAX_VALUE)));
            bw.addMutation(m);
        } finally {
            bw.close();
        }
    }
    try (Scanner s = ReplicationTable.getScanner(conn)) {
        entry = Iterables.getOnlyElement(s);
        Status stat = Status.parseFrom(entry.getValue().get());
        Assert.assertEquals(Long.MAX_VALUE, stat.getBegin());
    }
}
Also used : Status(org.apache.accumulo.server.replication.proto.Replication.Status) Connector(org.apache.accumulo.core.client.Connector) Scanner(org.apache.accumulo.core.client.Scanner) Value(org.apache.accumulo.core.data.Value) ClusterUser(org.apache.accumulo.cluster.ClusterUser) BatchWriter(org.apache.accumulo.core.client.BatchWriter) Mutation(org.apache.accumulo.core.data.Mutation) Key(org.apache.accumulo.core.data.Key) Test(org.junit.Test)

Example 72 with Status

use of org.apache.accumulo.server.replication.proto.Replication.Status in project accumulo by apache.

the class StatusMakerIT method closedMessagesAreDeleted.

@Test
public void closedMessagesAreDeleted() throws Exception {
    String sourceTable = testName.getMethodName();
    conn.tableOperations().create(sourceTable);
    ReplicationTableUtil.configureMetadataTable(conn, sourceTable);
    BatchWriter bw = conn.createBatchWriter(sourceTable, new BatchWriterConfig());
    String walPrefix = "hdfs://localhost:8020/accumulo/wals/tserver+port/";
    Set<String> files = Sets.newHashSet(walPrefix + UUID.randomUUID(), walPrefix + UUID.randomUUID(), walPrefix + UUID.randomUUID(), walPrefix + UUID.randomUUID());
    Map<String, Integer> fileToTableId = new HashMap<>();
    Status stat = Status.newBuilder().setBegin(0).setEnd(0).setInfiniteEnd(true).setClosed(true).setCreatedTime(System.currentTimeMillis()).build();
    int index = 1;
    for (String file : files) {
        Mutation m = new Mutation(ReplicationSection.getRowPrefix() + file);
        m.put(ReplicationSection.COLF, new Text(Integer.toString(index)), ProtobufUtil.toValue(stat));
        bw.addMutation(m);
        fileToTableId.put(file, index);
        index++;
    }
    bw.close();
    StatusMaker statusMaker = new StatusMaker(conn, fs);
    statusMaker.setSourceTableName(sourceTable);
    statusMaker.run();
    try (Scanner s = conn.createScanner(sourceTable, Authorizations.EMPTY)) {
        s.setRange(ReplicationSection.getRange());
        s.fetchColumnFamily(ReplicationSection.COLF);
        for (Entry<Key, Value> e : s) {
            System.out.println(e.getKey().toStringNoTruncate() + " " + e.getValue());
        }
    }
    try (Scanner s = conn.createScanner(sourceTable, Authorizations.EMPTY)) {
        s.setRange(ReplicationSection.getRange());
        s.fetchColumnFamily(ReplicationSection.COLF);
        Assert.assertEquals(0, Iterables.size(s));
    }
}
Also used : Status(org.apache.accumulo.server.replication.proto.Replication.Status) FileStatus(org.apache.hadoop.fs.FileStatus) Scanner(org.apache.accumulo.core.client.Scanner) HashMap(java.util.HashMap) Text(org.apache.hadoop.io.Text) StatusMaker(org.apache.accumulo.master.replication.StatusMaker) Value(org.apache.accumulo.core.data.Value) BatchWriterConfig(org.apache.accumulo.core.client.BatchWriterConfig) BatchWriter(org.apache.accumulo.core.client.BatchWriter) Mutation(org.apache.accumulo.core.data.Mutation) Key(org.apache.accumulo.core.data.Key) Test(org.junit.Test)

Example 73 with Status

use of org.apache.accumulo.server.replication.proto.Replication.Status in project accumulo by apache.

the class UnorderedWorkAssignerIT method createWorkForFilesNeedingIt.

@Test
public void createWorkForFilesNeedingIt() throws Exception {
    ReplicationTarget target1 = new ReplicationTarget("cluster1", "table1", Table.ID.of("1")), target2 = new ReplicationTarget("cluster1", "table2", Table.ID.of("2"));
    Text serializedTarget1 = target1.toText(), serializedTarget2 = target2.toText();
    String keyTarget1 = target1.getPeerName() + DistributedWorkQueueWorkAssignerHelper.KEY_SEPARATOR + target1.getRemoteIdentifier() + DistributedWorkQueueWorkAssignerHelper.KEY_SEPARATOR + target1.getSourceTableId(), keyTarget2 = target2.getPeerName() + DistributedWorkQueueWorkAssignerHelper.KEY_SEPARATOR + target2.getRemoteIdentifier() + DistributedWorkQueueWorkAssignerHelper.KEY_SEPARATOR + target2.getSourceTableId();
    Status.Builder builder = Status.newBuilder().setBegin(0).setEnd(0).setInfiniteEnd(true).setClosed(false).setCreatedTime(5l);
    Status status1 = builder.build();
    builder.setCreatedTime(10l);
    Status status2 = builder.build();
    // Create two mutations, both of which need replication work done
    BatchWriter bw = ReplicationTable.getBatchWriter(conn);
    String filename1 = UUID.randomUUID().toString(), filename2 = UUID.randomUUID().toString();
    String file1 = "/accumulo/wal/tserver+port/" + filename1, file2 = "/accumulo/wal/tserver+port/" + filename2;
    Mutation m = new Mutation(file1);
    WorkSection.add(m, serializedTarget1, ProtobufUtil.toValue(status1));
    bw.addMutation(m);
    m = OrderSection.createMutation(file1, status1.getCreatedTime());
    OrderSection.add(m, target1.getSourceTableId(), ProtobufUtil.toValue(status1));
    bw.addMutation(m);
    m = new Mutation(file2);
    WorkSection.add(m, serializedTarget2, ProtobufUtil.toValue(status2));
    bw.addMutation(m);
    m = OrderSection.createMutation(file2, status2.getCreatedTime());
    OrderSection.add(m, target2.getSourceTableId(), ProtobufUtil.toValue(status2));
    bw.addMutation(m);
    bw.close();
    DistributedWorkQueue workQueue = createMock(DistributedWorkQueue.class);
    HashSet<String> queuedWork = new HashSet<>();
    assigner.setQueuedWork(queuedWork);
    assigner.setWorkQueue(workQueue);
    assigner.setMaxQueueSize(Integer.MAX_VALUE);
    // Make sure we expect the invocations in the order they were created
    String key = filename1 + "|" + keyTarget1;
    workQueue.addWork(key, file1);
    expectLastCall().once();
    key = filename2 + "|" + keyTarget2;
    workQueue.addWork(key, file2);
    expectLastCall().once();
    replay(workQueue);
    assigner.createWork();
    verify(workQueue);
}
Also used : Status(org.apache.accumulo.server.replication.proto.Replication.Status) ReplicationTarget(org.apache.accumulo.core.replication.ReplicationTarget) Text(org.apache.hadoop.io.Text) BatchWriter(org.apache.accumulo.core.client.BatchWriter) Mutation(org.apache.accumulo.core.data.Mutation) DistributedWorkQueue(org.apache.accumulo.server.zookeeper.DistributedWorkQueue) HashSet(java.util.HashSet) Test(org.junit.Test)

Example 74 with Status

use of org.apache.accumulo.server.replication.proto.Replication.Status in project accumulo by apache.

the class UnusedWalDoesntCloseReplicationStatusIT method test.

@Test
public void test() throws Exception {
    File accumuloDir = this.getCluster().getConfig().getAccumuloDir();
    final Connector conn = getConnector();
    final String tableName = getUniqueNames(1)[0];
    conn.securityOperations().grantTablePermission("root", MetadataTable.NAME, TablePermission.WRITE);
    conn.tableOperations().create(tableName);
    final Table.ID tableId = Table.ID.of(conn.tableOperations().tableIdMap().get(tableName));
    final int numericTableId = Integer.parseInt(tableId.canonicalID());
    final int fakeTableId = numericTableId + 1;
    Assert.assertNotNull("Did not find table ID", tableId);
    conn.tableOperations().setProperty(tableName, Property.TABLE_REPLICATION.getKey(), "true");
    conn.tableOperations().setProperty(tableName, Property.TABLE_REPLICATION_TARGET.getKey() + "cluster1", "1");
    // just sleep
    conn.instanceOperations().setProperty(Property.REPLICATION_PEERS.getKey() + "cluster1", ReplicaSystemFactory.getPeerConfigurationValue(MockReplicaSystem.class, "50000"));
    FileSystem fs = FileSystem.getLocal(new Configuration());
    File tserverWalDir = new File(accumuloDir, ServerConstants.WAL_DIR + Path.SEPARATOR + "faketserver+port");
    File tserverWal = new File(tserverWalDir, UUID.randomUUID().toString());
    fs.mkdirs(new Path(tserverWalDir.getAbsolutePath()));
    // Make a fake WAL with no data in it for our real table
    FSDataOutputStream out = fs.create(new Path(tserverWal.getAbsolutePath()));
    out.write(DfsLogger.LOG_FILE_HEADER_V3.getBytes(UTF_8));
    DataOutputStream dos = new DataOutputStream(out);
    dos.writeUTF("NullCryptoModule");
    // Fake a single update WAL that has a mutation for another table
    LogFileKey key = new LogFileKey();
    LogFileValue value = new LogFileValue();
    key.event = OPEN;
    key.tserverSession = tserverWal.getAbsolutePath();
    key.filename = tserverWal.getAbsolutePath();
    key.write(out);
    value.write(out);
    key.event = LogEvents.DEFINE_TABLET;
    key.tablet = new KeyExtent(Table.ID.of(Integer.toString(fakeTableId)), null, null);
    key.seq = 1l;
    key.tid = 1;
    key.write(dos);
    value.write(dos);
    key.tablet = null;
    key.event = LogEvents.MUTATION;
    key.filename = tserverWal.getAbsolutePath();
    value.mutations = Arrays.asList(new ServerMutation(new Text("row")));
    key.write(dos);
    value.write(dos);
    key.event = LogEvents.COMPACTION_START;
    key.filename = accumuloDir.getAbsolutePath() + "/tables/" + fakeTableId + "/t-000001/A000001.rf";
    value.mutations = Collections.emptyList();
    key.write(dos);
    value.write(dos);
    key.event = LogEvents.COMPACTION_FINISH;
    value.mutations = Collections.emptyList();
    key.write(dos);
    value.write(dos);
    dos.close();
    BatchWriter bw = conn.createBatchWriter(tableName, new BatchWriterConfig());
    Mutation m = new Mutation("m");
    m.put("m", "m", "M");
    bw.addMutation(m);
    bw.close();
    log.info("State of metadata table after inserting a record");
    try (Scanner s = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY)) {
        s.setRange(MetadataSchema.TabletsSection.getRange(tableId));
        for (Entry<Key, Value> entry : s) {
            System.out.println(entry.getKey().toStringNoTruncate() + " " + entry.getValue());
        }
    }
    try (Scanner s = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY)) {
        s.setRange(MetadataSchema.ReplicationSection.getRange());
        for (Entry<Key, Value> entry : s) {
            System.out.println(entry.getKey().toStringNoTruncate() + " " + ProtobufUtil.toString(Status.parseFrom(entry.getValue().get())));
        }
        log.info("Offline'ing table");
        conn.tableOperations().offline(tableName, true);
        // Add our fake WAL to the log column for this table
        String walUri = tserverWal.toURI().toString();
        KeyExtent extent = new KeyExtent(tableId, null, null);
        bw = conn.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
        m = new Mutation(extent.getMetadataEntry());
        m.put(MetadataSchema.TabletsSection.LogColumnFamily.NAME, new Text("localhost:12345/" + walUri), new Value((walUri + "|1").getBytes(UTF_8)));
        bw.addMutation(m);
        // Add a replication entry for our fake WAL
        m = new Mutation(MetadataSchema.ReplicationSection.getRowPrefix() + new Path(walUri).toString());
        m.put(MetadataSchema.ReplicationSection.COLF, new Text(tableId.getUtf8()), new Value(StatusUtil.fileCreated(System.currentTimeMillis()).toByteArray()));
        bw.addMutation(m);
        bw.close();
        log.info("State of metadata after injecting WAL manually");
    }
    try (Scanner s = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY)) {
        s.setRange(MetadataSchema.TabletsSection.getRange(tableId));
        for (Entry<Key, Value> entry : s) {
            log.info("{} {}", entry.getKey().toStringNoTruncate(), entry.getValue());
        }
    }
    try (Scanner s = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY)) {
        s.setRange(MetadataSchema.ReplicationSection.getRange());
        for (Entry<Key, Value> entry : s) {
            log.info("{} {}", entry.getKey().toStringNoTruncate(), ProtobufUtil.toString(Status.parseFrom(entry.getValue().get())));
        }
        log.info("Bringing table online");
        conn.tableOperations().online(tableName, true);
        Assert.assertEquals(1, Iterables.size(conn.createScanner(tableName, Authorizations.EMPTY)));
        log.info("Table has performed recovery, state of metadata:");
    }
    try (Scanner s = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY)) {
        s.setRange(MetadataSchema.TabletsSection.getRange(tableId));
        for (Entry<Key, Value> entry : s) {
            log.info("{} {}", entry.getKey().toStringNoTruncate(), entry.getValue());
        }
    }
    try (Scanner s = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY)) {
        s.setRange(MetadataSchema.ReplicationSection.getRange());
        for (Entry<Key, Value> entry : s) {
            Status status = Status.parseFrom(entry.getValue().get());
            log.info("{} {}", entry.getKey().toStringNoTruncate(), ProtobufUtil.toString(status));
            Assert.assertFalse("Status record was closed and it should not be", status.getClosed());
        }
    }
}
Also used : Connector(org.apache.accumulo.core.client.Connector) Scanner(org.apache.accumulo.core.client.Scanner) Configuration(org.apache.hadoop.conf.Configuration) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) DataOutputStream(java.io.DataOutputStream) KeyExtent(org.apache.accumulo.core.data.impl.KeyExtent) FileSystem(org.apache.hadoop.fs.FileSystem) BatchWriterConfig(org.apache.accumulo.core.client.BatchWriterConfig) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) Path(org.apache.hadoop.fs.Path) Status(org.apache.accumulo.server.replication.proto.Replication.Status) MetadataTable(org.apache.accumulo.core.metadata.MetadataTable) Table(org.apache.accumulo.core.client.impl.Table) ServerMutation(org.apache.accumulo.server.data.ServerMutation) Text(org.apache.hadoop.io.Text) LogFileKey(org.apache.accumulo.tserver.logger.LogFileKey) LogFileValue(org.apache.accumulo.tserver.logger.LogFileValue) Value(org.apache.accumulo.core.data.Value) LogFileValue(org.apache.accumulo.tserver.logger.LogFileValue) BatchWriter(org.apache.accumulo.core.client.BatchWriter) Mutation(org.apache.accumulo.core.data.Mutation) ServerMutation(org.apache.accumulo.server.data.ServerMutation) File(java.io.File) Key(org.apache.accumulo.core.data.Key) LogFileKey(org.apache.accumulo.tserver.logger.LogFileKey) Test(org.junit.Test)

Example 75 with Status

use of org.apache.accumulo.server.replication.proto.Replication.Status in project accumulo by apache.

the class FinishedWorkUpdaterIT method chooseMinimumBeginOffsetInfiniteEnd.

@Test
public void chooseMinimumBeginOffsetInfiniteEnd() throws Exception {
    conn.securityOperations().grantTablePermission(conn.whoami(), ReplicationTable.NAME, TablePermission.READ);
    conn.securityOperations().grantTablePermission(conn.whoami(), ReplicationTable.NAME, TablePermission.WRITE);
    ReplicationTable.setOnline(conn);
    String file = "/accumulo/wals/tserver+port/" + UUID.randomUUID();
    // @formatter:off
    Status stat1 = Status.newBuilder().setBegin(100).setEnd(1000).setClosed(true).setInfiniteEnd(true).build(), stat2 = Status.newBuilder().setBegin(1).setEnd(1000).setClosed(true).setInfiniteEnd(true).build(), stat3 = Status.newBuilder().setBegin(500).setEnd(1000).setClosed(true).setInfiniteEnd(true).build();
    ReplicationTarget target1 = new ReplicationTarget("peer1", "table1", Table.ID.of("1")), target2 = new ReplicationTarget("peer2", "table2", Table.ID.of("1")), target3 = new ReplicationTarget("peer3", "table3", Table.ID.of("1"));
    // @formatter:on
    // Create a single work record for a file to some peer
    BatchWriter bw = ReplicationTable.getBatchWriter(conn);
    Mutation m = new Mutation(file);
    WorkSection.add(m, target1.toText(), ProtobufUtil.toValue(stat1));
    WorkSection.add(m, target2.toText(), ProtobufUtil.toValue(stat2));
    WorkSection.add(m, target3.toText(), ProtobufUtil.toValue(stat3));
    bw.addMutation(m);
    bw.close();
    updater.run();
    try (Scanner s = ReplicationTable.getScanner(conn)) {
        s.setRange(Range.exact(file));
        StatusSection.limit(s);
        Entry<Key, Value> entry = Iterables.getOnlyElement(s);
        Assert.assertEquals(entry.getKey().getColumnFamily(), StatusSection.NAME);
        Assert.assertEquals(entry.getKey().getColumnQualifier().toString(), target1.getSourceTableId().canonicalID());
        // We should only rely on the correct begin attribute being returned
        Status actual = Status.parseFrom(entry.getValue().get());
        Assert.assertEquals(1, actual.getBegin());
    }
}
Also used : Status(org.apache.accumulo.server.replication.proto.Replication.Status) Scanner(org.apache.accumulo.core.client.Scanner) ReplicationTarget(org.apache.accumulo.core.replication.ReplicationTarget) Value(org.apache.accumulo.core.data.Value) BatchWriter(org.apache.accumulo.core.client.BatchWriter) Mutation(org.apache.accumulo.core.data.Mutation) Key(org.apache.accumulo.core.data.Key) Test(org.junit.Test)

Aggregations

Status (org.apache.accumulo.server.replication.proto.Replication.Status)77 Test (org.junit.Test)57 Mutation (org.apache.accumulo.core.data.Mutation)30 Text (org.apache.hadoop.io.Text)29 BatchWriter (org.apache.accumulo.core.client.BatchWriter)28 Key (org.apache.accumulo.core.data.Key)27 Value (org.apache.accumulo.core.data.Value)26 Scanner (org.apache.accumulo.core.client.Scanner)21 ReplicationTarget (org.apache.accumulo.core.replication.ReplicationTarget)20 Path (org.apache.hadoop.fs.Path)17 HashMap (java.util.HashMap)14 BatchWriterConfig (org.apache.accumulo.core.client.BatchWriterConfig)14 Table (org.apache.accumulo.core.client.impl.Table)14 ReplicationTable (org.apache.accumulo.core.replication.ReplicationTable)13 AccumuloSecurityException (org.apache.accumulo.core.client.AccumuloSecurityException)12 AccumuloException (org.apache.accumulo.core.client.AccumuloException)11 Connector (org.apache.accumulo.core.client.Connector)11 InvalidProtocolBufferException (com.google.protobuf.InvalidProtocolBufferException)10 TableNotFoundException (org.apache.accumulo.core.client.TableNotFoundException)10 DataInputStream (java.io.DataInputStream)9