Search in sources :

Example 36 with TableId

use of org.apache.accumulo.core.data.TableId in project accumulo by apache.

the class ReplicationIT method filesClosedAfterUnused.

@Test
public void filesClosedAfterUnused() throws Exception {
    try (AccumuloClient client = Accumulo.newClient().from(getClientProperties()).build()) {
        String table = "table";
        Map<String, String> replicate_props = new HashMap<>();
        replicate_props.put(Property.TABLE_REPLICATION.getKey(), "true");
        replicate_props.put(Property.TABLE_REPLICATION_TARGET.getKey() + "cluster1", "1");
        client.tableOperations().create(table, new NewTableConfiguration().setProperties(replicate_props));
        TableId tableId = TableId.of(client.tableOperations().tableIdMap().get(table));
        assertNotNull(tableId);
        // just sleep
        client.instanceOperations().setProperty(Property.REPLICATION_PEERS.getKey() + "cluster1", ReplicaSystemFactory.getPeerConfigurationValue(MockReplicaSystem.class, "50000"));
        // Write a mutation to make a log file
        try (BatchWriter bw = client.createBatchWriter(table)) {
            Mutation m = new Mutation("one");
            m.put("", "", "");
            bw.addMutation(m);
        }
        // Write another to make sure the logger rolls itself?
        try (BatchWriter bw = client.createBatchWriter(table)) {
            Mutation m = new Mutation("three");
            m.put("", "", "");
            bw.addMutation(m);
        }
        try (Scanner s = client.createScanner(MetadataTable.NAME, Authorizations.EMPTY)) {
            s.fetchColumnFamily(LogColumnFamily.NAME);
            s.setRange(TabletsSection.getRange(tableId));
            Set<String> wals = new HashSet<>();
            for (Entry<Key, Value> entry : s) {
                LogEntry logEntry = LogEntry.fromMetaWalEntry(entry);
                wals.add(new Path(logEntry.filename).toString());
            }
            log.warn("Found wals {}", wals);
            try (BatchWriter bw = client.createBatchWriter(table)) {
                Mutation m = new Mutation("three");
                byte[] bytes = new byte[1024 * 1024];
                m.put("1".getBytes(), new byte[0], bytes);
                m.put("2".getBytes(), new byte[0], bytes);
                m.put("3".getBytes(), new byte[0], bytes);
                m.put("4".getBytes(), new byte[0], bytes);
                m.put("5".getBytes(), new byte[0], bytes);
                bw.addMutation(m);
            }
            client.tableOperations().flush(table, null, null, true);
            while (!ReplicationTable.isOnline(client)) {
                sleepUninterruptibly(MILLIS_BETWEEN_REPLICATION_TABLE_ONLINE_CHECKS, TimeUnit.MILLISECONDS);
            }
            for (int i = 0; i < 10; i++) {
                try (Scanner s2 = client.createScanner(MetadataTable.NAME, Authorizations.EMPTY)) {
                    s2.fetchColumnFamily(LogColumnFamily.NAME);
                    s2.setRange(TabletsSection.getRange(tableId));
                    for (Entry<Key, Value> entry : s2) {
                        log.info("{}={}", entry.getKey().toStringNoTruncate(), entry.getValue());
                    }
                }
                try (Scanner s3 = ReplicationTable.getScanner(client)) {
                    StatusSection.limit(s3);
                    Text buff = new Text();
                    boolean allReferencedLogsClosed = true;
                    int recordsFound = 0;
                    for (Entry<Key, Value> e : s3) {
                        recordsFound++;
                        allReferencedLogsClosed = true;
                        StatusSection.getFile(e.getKey(), buff);
                        String file = buff.toString();
                        if (wals.contains(file)) {
                            Status stat = Status.parseFrom(e.getValue().get());
                            if (!stat.getClosed()) {
                                log.info("{} wasn't closed", file);
                                allReferencedLogsClosed = false;
                            }
                        }
                    }
                    if (recordsFound > 0 && allReferencedLogsClosed) {
                        return;
                    }
                    Thread.sleep(2000);
                } catch (RuntimeException e) {
                    Throwable cause = e.getCause();
                    if (cause instanceof AccumuloSecurityException) {
                        AccumuloSecurityException ase = (AccumuloSecurityException) cause;
                        switch(ase.getSecurityErrorCode()) {
                            case PERMISSION_DENIED:
                                // We tried to read the replication table before the GRANT went through
                                Thread.sleep(2000);
                                break;
                            default:
                                throw e;
                        }
                    }
                }
            }
            fail("We had a file that was referenced but didn't get closed");
        }
    }
}
Also used : AccumuloClient(org.apache.accumulo.core.client.AccumuloClient) TableId(org.apache.accumulo.core.data.TableId) Scanner(org.apache.accumulo.core.client.Scanner) HashMap(java.util.HashMap) AccumuloSecurityException(org.apache.accumulo.core.client.AccumuloSecurityException) LogEntry(org.apache.accumulo.core.tabletserver.log.LogEntry) HashSet(java.util.HashSet) Path(org.apache.hadoop.fs.Path) Status(org.apache.accumulo.server.replication.proto.Replication.Status) Text(org.apache.hadoop.io.Text) NewTableConfiguration(org.apache.accumulo.core.client.admin.NewTableConfiguration) Value(org.apache.accumulo.core.data.Value) BatchWriter(org.apache.accumulo.core.client.BatchWriter) Mutation(org.apache.accumulo.core.data.Mutation) Key(org.apache.accumulo.core.data.Key) Test(org.junit.Test)

Example 37 with TableId

use of org.apache.accumulo.core.data.TableId in project accumulo by apache.

the class SequentialWorkAssignerIT method createWorkForFilesInCorrectOrder.

@Test
public void createWorkForFilesInCorrectOrder() throws Exception {
    ReplicationTarget target = new ReplicationTarget("cluster1", "table1", TableId.of("1"));
    Text serializedTarget = target.toText();
    // Create two mutations, both of which need replication work done
    BatchWriter bw = ReplicationTable.getBatchWriter(client);
    // We want the name of file2 to sort before file1
    String filename1 = "z_file1", filename2 = "a_file1";
    String file1 = "/accumulo/wal/tserver+port/" + filename1, file2 = "/accumulo/wal/tserver+port/" + filename2;
    // File1 was closed before file2, however
    Status stat1 = Status.newBuilder().setBegin(0).setEnd(100).setClosed(true).setInfiniteEnd(false).setCreatedTime(250).build();
    Status stat2 = Status.newBuilder().setBegin(0).setEnd(100).setClosed(true).setInfiniteEnd(false).setCreatedTime(500).build();
    Mutation m = new Mutation(file1);
    WorkSection.add(m, serializedTarget, ProtobufUtil.toValue(stat1));
    bw.addMutation(m);
    m = new Mutation(file2);
    WorkSection.add(m, serializedTarget, ProtobufUtil.toValue(stat2));
    bw.addMutation(m);
    m = OrderSection.createMutation(file1, stat1.getCreatedTime());
    OrderSection.add(m, target.getSourceTableId(), ProtobufUtil.toValue(stat1));
    bw.addMutation(m);
    m = OrderSection.createMutation(file2, stat2.getCreatedTime());
    OrderSection.add(m, target.getSourceTableId(), ProtobufUtil.toValue(stat2));
    bw.addMutation(m);
    bw.close();
    DistributedWorkQueue workQueue = createMock(DistributedWorkQueue.class);
    Map<String, Map<TableId, String>> queuedWork = new HashMap<>();
    assigner.setQueuedWork(queuedWork);
    assigner.setWorkQueue(workQueue);
    assigner.setMaxQueueSize(Integer.MAX_VALUE);
    // Make sure we expect the invocations in the correct order (accumulo is sorted)
    workQueue.addWork(DistributedWorkQueueWorkAssignerHelper.getQueueKey(filename1, target), file1);
    expectLastCall().once();
    // file2 is *not* queued because file1 must be replicated first
    replay(workQueue);
    assigner.createWork();
    verify(workQueue);
    assertEquals(1, queuedWork.size());
    assertTrue(queuedWork.containsKey("cluster1"));
    Map<TableId, String> cluster1Work = queuedWork.get("cluster1");
    assertEquals(1, cluster1Work.size());
    assertTrue(cluster1Work.containsKey(target.getSourceTableId()));
    assertEquals(DistributedWorkQueueWorkAssignerHelper.getQueueKey(filename1, target), cluster1Work.get(target.getSourceTableId()));
}
Also used : Status(org.apache.accumulo.server.replication.proto.Replication.Status) TableId(org.apache.accumulo.core.data.TableId) ReplicationTarget(org.apache.accumulo.core.replication.ReplicationTarget) HashMap(java.util.HashMap) Text(org.apache.hadoop.io.Text) BatchWriter(org.apache.accumulo.core.client.BatchWriter) Mutation(org.apache.accumulo.core.data.Mutation) HashMap(java.util.HashMap) Map(java.util.Map) DistributedWorkQueue(org.apache.accumulo.server.zookeeper.DistributedWorkQueue) Test(org.junit.Test)

Example 38 with TableId

use of org.apache.accumulo.core.data.TableId in project accumulo by apache.

the class SequentialWorkAssignerIT method workAcrossTablesHappensConcurrently.

@Test
public void workAcrossTablesHappensConcurrently() throws Exception {
    ReplicationTarget target1 = new ReplicationTarget("cluster1", "table1", TableId.of("1"));
    Text serializedTarget1 = target1.toText();
    ReplicationTarget target2 = new ReplicationTarget("cluster1", "table2", TableId.of("2"));
    Text serializedTarget2 = target2.toText();
    // Create two mutations, both of which need replication work done
    BatchWriter bw = ReplicationTable.getBatchWriter(client);
    // We want the name of file2 to sort before file1
    String filename1 = "z_file1", filename2 = "a_file1";
    String file1 = "/accumulo/wal/tserver+port/" + filename1, file2 = "/accumulo/wal/tserver+port/" + filename2;
    // File1 was closed before file2, however
    Status stat1 = Status.newBuilder().setBegin(0).setEnd(100).setClosed(true).setInfiniteEnd(false).setCreatedTime(250).build();
    Status stat2 = Status.newBuilder().setBegin(0).setEnd(100).setClosed(true).setInfiniteEnd(false).setCreatedTime(500).build();
    Mutation m = new Mutation(file1);
    WorkSection.add(m, serializedTarget1, ProtobufUtil.toValue(stat1));
    bw.addMutation(m);
    m = new Mutation(file2);
    WorkSection.add(m, serializedTarget2, ProtobufUtil.toValue(stat2));
    bw.addMutation(m);
    m = OrderSection.createMutation(file1, stat1.getCreatedTime());
    OrderSection.add(m, target1.getSourceTableId(), ProtobufUtil.toValue(stat1));
    bw.addMutation(m);
    m = OrderSection.createMutation(file2, stat2.getCreatedTime());
    OrderSection.add(m, target2.getSourceTableId(), ProtobufUtil.toValue(stat2));
    bw.addMutation(m);
    bw.close();
    DistributedWorkQueue workQueue = createMock(DistributedWorkQueue.class);
    Map<String, Map<TableId, String>> queuedWork = new HashMap<>();
    assigner.setQueuedWork(queuedWork);
    assigner.setWorkQueue(workQueue);
    assigner.setMaxQueueSize(Integer.MAX_VALUE);
    // Make sure we expect the invocations in the correct order (accumulo is sorted)
    workQueue.addWork(DistributedWorkQueueWorkAssignerHelper.getQueueKey(filename1, target1), file1);
    expectLastCall().once();
    workQueue.addWork(DistributedWorkQueueWorkAssignerHelper.getQueueKey(filename2, target2), file2);
    expectLastCall().once();
    // file2 is *not* queued because file1 must be replicated first
    replay(workQueue);
    assigner.createWork();
    verify(workQueue);
    assertEquals(1, queuedWork.size());
    assertTrue(queuedWork.containsKey("cluster1"));
    Map<TableId, String> cluster1Work = queuedWork.get("cluster1");
    assertEquals(2, cluster1Work.size());
    assertTrue(cluster1Work.containsKey(target1.getSourceTableId()));
    assertEquals(DistributedWorkQueueWorkAssignerHelper.getQueueKey(filename1, target1), cluster1Work.get(target1.getSourceTableId()));
    assertTrue(cluster1Work.containsKey(target2.getSourceTableId()));
    assertEquals(DistributedWorkQueueWorkAssignerHelper.getQueueKey(filename2, target2), cluster1Work.get(target2.getSourceTableId()));
}
Also used : Status(org.apache.accumulo.server.replication.proto.Replication.Status) TableId(org.apache.accumulo.core.data.TableId) ReplicationTarget(org.apache.accumulo.core.replication.ReplicationTarget) HashMap(java.util.HashMap) Text(org.apache.hadoop.io.Text) BatchWriter(org.apache.accumulo.core.client.BatchWriter) Mutation(org.apache.accumulo.core.data.Mutation) HashMap(java.util.HashMap) Map(java.util.Map) DistributedWorkQueue(org.apache.accumulo.server.zookeeper.DistributedWorkQueue) Test(org.junit.Test)

Example 39 with TableId

use of org.apache.accumulo.core.data.TableId in project accumulo by apache.

the class CollectTabletStats method findTablets.

private static List<KeyExtent> findTablets(ClientContext context, boolean selectLocalTablets, String tableName, SortedMap<KeyExtent, String> tabletLocations) throws Exception {
    TableId tableId = context.getTableId(tableName);
    MetadataServicer.forTableId(context, tableId).getTabletLocations(tabletLocations);
    InetAddress localaddress = InetAddress.getLocalHost();
    List<KeyExtent> candidates = new ArrayList<>();
    for (Entry<KeyExtent, String> entry : tabletLocations.entrySet()) {
        String loc = entry.getValue();
        if (loc != null) {
            boolean isLocal = HostAndPort.fromString(entry.getValue()).getHost().equals(localaddress.getHostName());
            if (selectLocalTablets && isLocal) {
                candidates.add(entry.getKey());
            } else if (!selectLocalTablets && !isLocal) {
                candidates.add(entry.getKey());
            }
        }
    }
    return candidates;
}
Also used : TableId(org.apache.accumulo.core.data.TableId) ArrayList(java.util.ArrayList) InetAddress(java.net.InetAddress) KeyExtent(org.apache.accumulo.core.dataImpl.KeyExtent)

Example 40 with TableId

use of org.apache.accumulo.core.data.TableId in project accumulo by apache.

the class ReplicationOperationsImplIT method waitsUntilEntriesAreReplicated.

@Test
public void waitsUntilEntriesAreReplicated() throws Exception {
    client.tableOperations().create("foo");
    TableId tableId = TableId.of(client.tableOperations().tableIdMap().get("foo"));
    String file1 = "/accumulo/wals/tserver+port/" + UUID.randomUUID(), file2 = "/accumulo/wals/tserver+port/" + UUID.randomUUID();
    Status stat = Status.newBuilder().setBegin(0).setEnd(10000).setInfiniteEnd(false).setClosed(false).build();
    BatchWriter bw = ReplicationTable.getBatchWriter(client);
    Mutation m = new Mutation(file1);
    StatusSection.add(m, tableId, ProtobufUtil.toValue(stat));
    bw.addMutation(m);
    m = new Mutation(file2);
    StatusSection.add(m, tableId, ProtobufUtil.toValue(stat));
    bw.addMutation(m);
    bw.close();
    bw = client.createBatchWriter(MetadataTable.NAME);
    m = new Mutation(ReplicationSection.getRowPrefix() + file1);
    m.put(ReplicationSection.COLF, new Text(tableId.canonical()), ProtobufUtil.toValue(stat));
    bw.addMutation(m);
    m = new Mutation(ReplicationSection.getRowPrefix() + file2);
    m.put(ReplicationSection.COLF, new Text(tableId.canonical()), ProtobufUtil.toValue(stat));
    bw.close();
    final AtomicBoolean done = new AtomicBoolean(false);
    final AtomicBoolean exception = new AtomicBoolean(false);
    final ReplicationOperationsImpl roi = getReplicationOperations();
    Thread t = new Thread(() -> {
        try {
            roi.drain("foo");
        } catch (Exception e) {
            log.error("Got error", e);
            exception.set(true);
        }
        done.set(true);
    });
    t.start();
    // With the records, we shouldn't be drained
    assertFalse(done.get());
    bw = client.createBatchWriter(MetadataTable.NAME);
    m = new Mutation(ReplicationSection.getRowPrefix() + file1);
    m.putDelete(ReplicationSection.COLF, new Text(tableId.canonical()));
    bw.addMutation(m);
    bw.flush();
    assertFalse(done.get());
    m = new Mutation(ReplicationSection.getRowPrefix() + file2);
    m.putDelete(ReplicationSection.COLF, new Text(tableId.canonical()));
    bw.addMutation(m);
    bw.flush();
    bw.close();
    // Removing metadata entries doesn't change anything
    assertFalse(done.get());
    // Remove the replication entries too
    bw = ReplicationTable.getBatchWriter(client);
    m = new Mutation(file1);
    m.putDelete(StatusSection.NAME, new Text(tableId.canonical()));
    bw.addMutation(m);
    bw.flush();
    assertFalse(done.get());
    m = new Mutation(file2);
    m.putDelete(StatusSection.NAME, new Text(tableId.canonical()));
    bw.addMutation(m);
    bw.flush();
    try {
        t.join(5000);
    } catch (InterruptedException e) {
        fail("ReplicationOperations.drain did not complete");
    }
    // After both metadata and replication
    assertTrue("Drain never finished", done.get());
    assertFalse("Saw unexpected exception", exception.get());
}
Also used : TableId(org.apache.accumulo.core.data.TableId) Status(org.apache.accumulo.server.replication.proto.Replication.Status) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) Text(org.apache.hadoop.io.Text) BatchWriter(org.apache.accumulo.core.client.BatchWriter) Mutation(org.apache.accumulo.core.data.Mutation) ReplicationOperationsImpl(org.apache.accumulo.core.clientImpl.ReplicationOperationsImpl) TException(org.apache.thrift.TException) Test(org.junit.Test)

Aggregations

TableId (org.apache.accumulo.core.data.TableId)169 Text (org.apache.hadoop.io.Text)64 HashMap (java.util.HashMap)55 KeyExtent (org.apache.accumulo.core.dataImpl.KeyExtent)55 ArrayList (java.util.ArrayList)45 Test (org.junit.Test)43 TableNotFoundException (org.apache.accumulo.core.client.TableNotFoundException)41 Map (java.util.Map)37 Key (org.apache.accumulo.core.data.Key)36 AccumuloClient (org.apache.accumulo.core.client.AccumuloClient)34 HashSet (java.util.HashSet)31 AccumuloSecurityException (org.apache.accumulo.core.client.AccumuloSecurityException)31 Value (org.apache.accumulo.core.data.Value)31 IOException (java.io.IOException)28 Scanner (org.apache.accumulo.core.client.Scanner)28 AccumuloException (org.apache.accumulo.core.client.AccumuloException)27 Mutation (org.apache.accumulo.core.data.Mutation)27 List (java.util.List)26 Range (org.apache.accumulo.core.data.Range)24 BatchWriter (org.apache.accumulo.core.client.BatchWriter)23