Search in sources :

Example 11 with LogEntry

use of org.apache.accumulo.core.tabletserver.log.LogEntry in project accumulo by apache.

the class LogEntryTest method test.

@Test
public void test() throws Exception {
    KeyExtent extent = new KeyExtent(Table.ID.of("1"), null, new Text(""));
    long ts = 12345678L;
    String server = "localhost:1234";
    String filename = "default/foo";
    LogEntry entry = new LogEntry(extent, ts, server, filename);
    assertEquals(extent, entry.extent);
    assertEquals(server, entry.server);
    assertEquals(filename, entry.filename);
    assertEquals(ts, entry.timestamp);
    assertEquals("1<; default/foo", entry.toString());
    assertEquals(new Text("log"), entry.getColumnFamily());
    assertEquals(new Text("localhost:1234/default/foo"), entry.getColumnQualifier());
    LogEntry copy = LogEntry.fromBytes(entry.toBytes());
    assertEquals(entry.toString(), copy.toString());
    Key key = new Key(new Text("1<"), new Text("log"), new Text("localhost:1234/default/foo"));
    key.setTimestamp(ts);
    LogEntry copy2 = LogEntry.fromKeyValue(key, entry.getValue());
    assertEquals(entry.toString(), copy2.toString());
    assertEquals(entry.timestamp, copy2.timestamp);
    assertEquals("foo", entry.getUniqueID());
    assertEquals("localhost:1234/default/foo", entry.getName());
    assertEquals(new Value("default/foo".getBytes()), entry.getValue());
}
Also used : Value(org.apache.accumulo.core.data.Value) Text(org.apache.hadoop.io.Text) KeyExtent(org.apache.accumulo.core.data.impl.KeyExtent) LogEntry(org.apache.accumulo.core.tabletserver.log.LogEntry) Key(org.apache.accumulo.core.data.Key) Test(org.junit.Test)

Example 12 with LogEntry

use of org.apache.accumulo.core.tabletserver.log.LogEntry in project accumulo by apache.

the class ReplicationOperationsImplIT method inprogressReplicationRecordsBlockExecution.

@Test
public void inprogressReplicationRecordsBlockExecution() throws Exception {
    conn.tableOperations().create("foo");
    Table.ID tableId1 = Table.ID.of(conn.tableOperations().tableIdMap().get("foo"));
    String file1 = "/accumulo/wals/tserver+port/" + UUID.randomUUID();
    Status stat = Status.newBuilder().setBegin(0).setEnd(10000).setInfiniteEnd(false).setClosed(false).build();
    BatchWriter bw = ReplicationTable.getBatchWriter(conn);
    Mutation m = new Mutation(file1);
    StatusSection.add(m, tableId1, ProtobufUtil.toValue(stat));
    bw.addMutation(m);
    bw.close();
    LogEntry logEntry = new LogEntry(new KeyExtent(tableId1, null, null), System.currentTimeMillis(), "tserver", file1);
    bw = conn.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
    m = new Mutation(ReplicationSection.getRowPrefix() + file1);
    m.put(ReplicationSection.COLF, new Text(tableId1.getUtf8()), ProtobufUtil.toValue(stat));
    bw.addMutation(m);
    m = new Mutation(logEntry.getRow());
    m.put(logEntry.getColumnFamily(), logEntry.getColumnQualifier(), logEntry.getValue());
    bw.addMutation(m);
    bw.close();
    final AtomicBoolean done = new AtomicBoolean(false);
    final AtomicBoolean exception = new AtomicBoolean(false);
    final ReplicationOperationsImpl roi = getReplicationOperations();
    Thread t = new Thread(new Runnable() {

        @Override
        public void run() {
            try {
                roi.drain("foo");
            } catch (Exception e) {
                log.error("Got error", e);
                exception.set(true);
            }
            done.set(true);
        }
    });
    t.start();
    // With the records, we shouldn't be drained
    Assert.assertFalse(done.get());
    Status newStatus = Status.newBuilder().setBegin(1000).setEnd(2000).setInfiniteEnd(false).setClosed(true).build();
    bw = conn.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
    m = new Mutation(ReplicationSection.getRowPrefix() + file1);
    m.put(ReplicationSection.COLF, new Text(tableId1.getUtf8()), ProtobufUtil.toValue(newStatus));
    bw.addMutation(m);
    bw.flush();
    // Removing metadata entries doesn't change anything
    Assert.assertFalse(done.get());
    // Remove the replication entries too
    bw = ReplicationTable.getBatchWriter(conn);
    m = new Mutation(file1);
    m.put(StatusSection.NAME, new Text(tableId1.getUtf8()), ProtobufUtil.toValue(newStatus));
    bw.addMutation(m);
    bw.flush();
    try {
        t.join(5000);
    } catch (InterruptedException e) {
        Assert.fail("ReplicationOperations.drain did not complete");
    }
    // New records, but not fully replicated ones don't cause it to complete
    Assert.assertFalse("Drain somehow finished", done.get());
    Assert.assertFalse("Saw unexpected exception", exception.get());
}
Also used : Status(org.apache.accumulo.server.replication.proto.Replication.Status) MetadataTable(org.apache.accumulo.core.metadata.MetadataTable) Table(org.apache.accumulo.core.client.impl.Table) ReplicationTable(org.apache.accumulo.core.replication.ReplicationTable) Text(org.apache.hadoop.io.Text) KeyExtent(org.apache.accumulo.core.data.impl.KeyExtent) TableNotFoundException(org.apache.accumulo.core.client.TableNotFoundException) AccumuloSecurityException(org.apache.accumulo.core.client.AccumuloSecurityException) TException(org.apache.thrift.TException) AccumuloException(org.apache.accumulo.core.client.AccumuloException) ThriftTableOperationException(org.apache.accumulo.core.client.impl.thrift.ThriftTableOperationException) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) BatchWriterConfig(org.apache.accumulo.core.client.BatchWriterConfig) BatchWriter(org.apache.accumulo.core.client.BatchWriter) Mutation(org.apache.accumulo.core.data.Mutation) LogEntry(org.apache.accumulo.core.tabletserver.log.LogEntry) ReplicationOperationsImpl(org.apache.accumulo.core.client.impl.ReplicationOperationsImpl) Test(org.junit.Test)

Example 13 with LogEntry

use of org.apache.accumulo.core.tabletserver.log.LogEntry in project accumulo by apache.

the class VolumeUtil method switchVolumes.

private static LogEntry switchVolumes(LogEntry le, List<Pair<Path, Path>> replacements) {
    String switchedPath = switchVolume(le.filename, FileType.WAL, replacements);
    int numSwitched = 0;
    if (switchedPath != null)
        numSwitched++;
    else
        switchedPath = le.filename;
    ArrayList<String> switchedLogs = new ArrayList<>();
    String switchedLog = switchVolume(le.filename, FileType.WAL, replacements);
    if (switchedLog != null) {
        switchedLogs.add(switchedLog);
        numSwitched++;
    } else {
        switchedLogs.add(le.filename);
    }
    if (numSwitched == 0) {
        log.trace("Did not switch {}", le);
        return null;
    }
    LogEntry newLogEntry = new LogEntry(le.extent, le.timestamp, le.server, switchedPath);
    log.trace("Switched {} to {}", le, newLogEntry);
    return newLogEntry;
}
Also used : ArrayList(java.util.ArrayList) LogEntry(org.apache.accumulo.core.tabletserver.log.LogEntry)

Example 14 with LogEntry

use of org.apache.accumulo.core.tabletserver.log.LogEntry in project accumulo by apache.

the class MetadataTableUtil method updateTabletVolumes.

public static void updateTabletVolumes(KeyExtent extent, List<LogEntry> logsToRemove, List<LogEntry> logsToAdd, List<FileRef> filesToRemove, SortedMap<FileRef, DataFileValue> filesToAdd, String newDir, ZooLock zooLock, AccumuloServerContext context) {
    if (extent.isRootTablet()) {
        if (newDir != null)
            throw new IllegalArgumentException("newDir not expected for " + extent);
        if (filesToRemove.size() != 0 || filesToAdd.size() != 0)
            throw new IllegalArgumentException("files not expected for " + extent);
        // add before removing in case of process death
        for (LogEntry logEntry : logsToAdd) addRootLogEntry(context, zooLock, logEntry);
        removeUnusedWALEntries(context, extent, logsToRemove, zooLock);
    } else {
        Mutation m = new Mutation(extent.getMetadataEntry());
        for (LogEntry logEntry : logsToRemove) m.putDelete(logEntry.getColumnFamily(), logEntry.getColumnQualifier());
        for (LogEntry logEntry : logsToAdd) m.put(logEntry.getColumnFamily(), logEntry.getColumnQualifier(), logEntry.getValue());
        for (FileRef fileRef : filesToRemove) m.putDelete(DataFileColumnFamily.NAME, fileRef.meta());
        for (Entry<FileRef, DataFileValue> entry : filesToAdd.entrySet()) m.put(DataFileColumnFamily.NAME, entry.getKey().meta(), new Value(entry.getValue().encode()));
        if (newDir != null)
            ServerColumnFamily.DIRECTORY_COLUMN.put(m, new Value(newDir.getBytes(UTF_8)));
        update(context, m, extent);
    }
}
Also used : DataFileValue(org.apache.accumulo.core.metadata.schema.DataFileValue) FileRef(org.apache.accumulo.server.fs.FileRef) Value(org.apache.accumulo.core.data.Value) DataFileValue(org.apache.accumulo.core.metadata.schema.DataFileValue) Mutation(org.apache.accumulo.core.data.Mutation) LogEntry(org.apache.accumulo.core.tabletserver.log.LogEntry)

Example 15 with LogEntry

use of org.apache.accumulo.core.tabletserver.log.LogEntry in project accumulo by apache.

the class MetadataTableUtil method removeUnusedWALEntries.

public static void removeUnusedWALEntries(AccumuloServerContext context, KeyExtent extent, final List<LogEntry> entries, ZooLock zooLock) {
    if (extent.isRootTablet()) {
        retryZooKeeperUpdate(context, zooLock, new ZooOperation() {

            @Override
            public void run(IZooReaderWriter rw) throws KeeperException, InterruptedException, IOException {
                String root = getZookeeperLogLocation();
                for (LogEntry entry : entries) {
                    String path = root + "/" + entry.getUniqueID();
                    log.debug("Removing " + path + " from zookeeper");
                    rw.recursiveDelete(path, NodeMissingPolicy.SKIP);
                }
            }
        });
    } else {
        Mutation m = new Mutation(extent.getMetadataEntry());
        for (LogEntry entry : entries) {
            m.putDelete(entry.getColumnFamily(), entry.getColumnQualifier());
        }
        update(context, zooLock, m, extent);
    }
}
Also used : IZooReaderWriter(org.apache.accumulo.fate.zookeeper.IZooReaderWriter) IOException(java.io.IOException) Mutation(org.apache.accumulo.core.data.Mutation) KeeperException(org.apache.zookeeper.KeeperException) LogEntry(org.apache.accumulo.core.tabletserver.log.LogEntry)

Aggregations

LogEntry (org.apache.accumulo.core.tabletserver.log.LogEntry)19 Path (org.apache.hadoop.fs.Path)10 ArrayList (java.util.ArrayList)7 Mutation (org.apache.accumulo.core.data.Mutation)7 Value (org.apache.accumulo.core.data.Value)7 Text (org.apache.hadoop.io.Text)7 Scanner (org.apache.accumulo.core.client.Scanner)6 Key (org.apache.accumulo.core.data.Key)6 MetadataTable (org.apache.accumulo.core.metadata.MetadataTable)6 BatchWriter (org.apache.accumulo.core.client.BatchWriter)5 Table (org.apache.accumulo.core.client.impl.Table)5 Test (org.junit.Test)5 IOException (java.io.IOException)4 BatchWriterConfig (org.apache.accumulo.core.client.BatchWriterConfig)4 Connector (org.apache.accumulo.core.client.Connector)4 DataFileValue (org.apache.accumulo.core.metadata.schema.DataFileValue)4 KeyExtent (org.apache.accumulo.core.data.impl.KeyExtent)3 ReplicationTable (org.apache.accumulo.core.replication.ReplicationTable)3 Status (org.apache.accumulo.server.replication.proto.Replication.Status)3 File (java.io.File)2