Search in sources :

Example 1 with LogEntry

use of org.apache.accumulo.core.tabletserver.log.LogEntry in project accumulo by apache.

the class ReplicationIT method filesClosedAfterUnused.

@Test
public void filesClosedAfterUnused() throws Exception {
    Connector conn = getConnector();
    String table = "table";
    conn.tableOperations().create(table);
    Table.ID tableId = Table.ID.of(conn.tableOperations().tableIdMap().get(table));
    Assert.assertNotNull(tableId);
    conn.tableOperations().setProperty(table, Property.TABLE_REPLICATION.getKey(), "true");
    conn.tableOperations().setProperty(table, Property.TABLE_REPLICATION_TARGET.getKey() + "cluster1", "1");
    // just sleep
    conn.instanceOperations().setProperty(Property.REPLICATION_PEERS.getKey() + "cluster1", ReplicaSystemFactory.getPeerConfigurationValue(MockReplicaSystem.class, "50000"));
    // Write a mutation to make a log file
    BatchWriter bw = conn.createBatchWriter(table, new BatchWriterConfig());
    Mutation m = new Mutation("one");
    m.put("", "", "");
    bw.addMutation(m);
    bw.close();
    // Write another to make sure the logger rolls itself?
    bw = conn.createBatchWriter(table, new BatchWriterConfig());
    m = new Mutation("three");
    m.put("", "", "");
    bw.addMutation(m);
    bw.close();
    try (Scanner s = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY)) {
        s.fetchColumnFamily(TabletsSection.LogColumnFamily.NAME);
        s.setRange(TabletsSection.getRange(tableId));
        Set<String> wals = new HashSet<>();
        for (Entry<Key, Value> entry : s) {
            LogEntry logEntry = LogEntry.fromKeyValue(entry.getKey(), entry.getValue());
            wals.add(new Path(logEntry.filename).toString());
        }
        log.warn("Found wals {}", wals);
        bw = conn.createBatchWriter(table, new BatchWriterConfig());
        m = new Mutation("three");
        byte[] bytes = new byte[1024 * 1024];
        m.put("1".getBytes(), new byte[0], bytes);
        m.put("2".getBytes(), new byte[0], bytes);
        m.put("3".getBytes(), new byte[0], bytes);
        m.put("4".getBytes(), new byte[0], bytes);
        m.put("5".getBytes(), new byte[0], bytes);
        bw.addMutation(m);
        bw.close();
        conn.tableOperations().flush(table, null, null, true);
        while (!ReplicationTable.isOnline(conn)) {
            sleepUninterruptibly(MILLIS_BETWEEN_REPLICATION_TABLE_ONLINE_CHECKS, TimeUnit.MILLISECONDS);
        }
        for (int i = 0; i < 10; i++) {
            try (Scanner s2 = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY)) {
                s2.fetchColumnFamily(LogColumnFamily.NAME);
                s2.setRange(TabletsSection.getRange(tableId));
                for (Entry<Key, Value> entry : s2) {
                    log.info("{}={}", entry.getKey().toStringNoTruncate(), entry.getValue());
                }
            }
            try (Scanner s3 = ReplicationTable.getScanner(conn)) {
                StatusSection.limit(s3);
                Text buff = new Text();
                boolean allReferencedLogsClosed = true;
                int recordsFound = 0;
                for (Entry<Key, Value> e : s3) {
                    recordsFound++;
                    allReferencedLogsClosed = true;
                    StatusSection.getFile(e.getKey(), buff);
                    String file = buff.toString();
                    if (wals.contains(file)) {
                        Status stat = Status.parseFrom(e.getValue().get());
                        if (!stat.getClosed()) {
                            log.info("{} wasn't closed", file);
                            allReferencedLogsClosed = false;
                        }
                    }
                }
                if (recordsFound > 0 && allReferencedLogsClosed) {
                    return;
                }
                Thread.sleep(2000);
            } catch (RuntimeException e) {
                Throwable cause = e.getCause();
                if (cause instanceof AccumuloSecurityException) {
                    AccumuloSecurityException ase = (AccumuloSecurityException) cause;
                    switch(ase.getSecurityErrorCode()) {
                        case PERMISSION_DENIED:
                            // We tried to read the replication table before the GRANT went through
                            Thread.sleep(2000);
                            break;
                        default:
                            throw e;
                    }
                }
            }
        }
        Assert.fail("We had a file that was referenced but didn't get closed");
    }
}
Also used : Path(org.apache.hadoop.fs.Path) Status(org.apache.accumulo.server.replication.proto.Replication.Status) Connector(org.apache.accumulo.core.client.Connector) Scanner(org.apache.accumulo.core.client.Scanner) MetadataTable(org.apache.accumulo.core.metadata.MetadataTable) Table(org.apache.accumulo.core.client.impl.Table) ReplicationTable(org.apache.accumulo.core.replication.ReplicationTable) Text(org.apache.hadoop.io.Text) Value(org.apache.accumulo.core.data.Value) BatchWriterConfig(org.apache.accumulo.core.client.BatchWriterConfig) AccumuloSecurityException(org.apache.accumulo.core.client.AccumuloSecurityException) BatchWriter(org.apache.accumulo.core.client.BatchWriter) Mutation(org.apache.accumulo.core.data.Mutation) Key(org.apache.accumulo.core.data.Key) LogEntry(org.apache.accumulo.core.tabletserver.log.LogEntry) HashSet(java.util.HashSet) Test(org.junit.Test)

Example 2 with LogEntry

use of org.apache.accumulo.core.tabletserver.log.LogEntry in project accumulo by apache.

the class TabletServer method recover.

public void recover(VolumeManager fs, KeyExtent extent, TableConfiguration tconf, List<LogEntry> logEntries, Set<String> tabletFiles, MutationReceiver mutationReceiver) throws IOException {
    List<Path> recoveryLogs = new ArrayList<>();
    List<LogEntry> sorted = new ArrayList<>(logEntries);
    Collections.sort(sorted, new Comparator<LogEntry>() {

        @Override
        public int compare(LogEntry e1, LogEntry e2) {
            return (int) (e1.timestamp - e2.timestamp);
        }
    });
    for (LogEntry entry : sorted) {
        Path recovery = null;
        Path finished = RecoveryPath.getRecoveryPath(fs, fs.getFullPath(FileType.WAL, entry.filename));
        finished = SortedLogState.getFinishedMarkerPath(finished);
        TabletServer.log.info("Looking for " + finished);
        if (fs.exists(finished)) {
            recovery = finished.getParent();
        }
        if (recovery == null)
            throw new IOException("Unable to find recovery files for extent " + extent + " logEntry: " + entry);
        recoveryLogs.add(recovery);
    }
    logger.recover(fs, extent, tconf, recoveryLogs, tabletFiles, mutationReceiver);
}
Also used : RecoveryPath(org.apache.accumulo.server.master.recovery.RecoveryPath) Path(org.apache.hadoop.fs.Path) ArrayList(java.util.ArrayList) IOException(java.io.IOException) LogEntry(org.apache.accumulo.core.tabletserver.log.LogEntry)

Example 3 with LogEntry

use of org.apache.accumulo.core.tabletserver.log.LogEntry in project accumulo by apache.

the class VolumeUtil method updateTabletVolumes.

/**
 * This method does two things. First, it switches any volumes a tablet is using that are configured in instance.volumes.replacements. Second, if a tablet dir
 * is no longer configured for use it chooses a new tablet directory.
 */
public static TabletFiles updateTabletVolumes(AccumuloServerContext context, ZooLock zooLock, VolumeManager vm, KeyExtent extent, TabletFiles tabletFiles, boolean replicate) throws IOException {
    List<Pair<Path, Path>> replacements = ServerConstants.getVolumeReplacements();
    log.trace("Using volume replacements: {}", replacements);
    List<LogEntry> logsToRemove = new ArrayList<>();
    List<LogEntry> logsToAdd = new ArrayList<>();
    List<FileRef> filesToRemove = new ArrayList<>();
    SortedMap<FileRef, DataFileValue> filesToAdd = new TreeMap<>();
    TabletFiles ret = new TabletFiles();
    for (LogEntry logEntry : tabletFiles.logEntries) {
        LogEntry switchedLogEntry = switchVolumes(logEntry, replacements);
        if (switchedLogEntry != null) {
            logsToRemove.add(logEntry);
            logsToAdd.add(switchedLogEntry);
            ret.logEntries.add(switchedLogEntry);
            log.debug("Replacing volume {} : {} -> {}", extent, logEntry.filename, switchedLogEntry.filename);
        } else {
            ret.logEntries.add(logEntry);
        }
    }
    if (extent.isRootTablet()) {
        ret.datafiles = tabletFiles.datafiles;
    } else {
        for (Entry<FileRef, DataFileValue> entry : tabletFiles.datafiles.entrySet()) {
            String metaPath = entry.getKey().meta().toString();
            String switchedPath = switchVolume(metaPath, FileType.TABLE, replacements);
            if (switchedPath != null) {
                filesToRemove.add(entry.getKey());
                FileRef switchedRef = new FileRef(switchedPath, new Path(switchedPath));
                filesToAdd.put(switchedRef, entry.getValue());
                ret.datafiles.put(switchedRef, entry.getValue());
                log.debug("Replacing volume {} : {} -> {}", extent, metaPath, switchedPath);
            } else {
                ret.datafiles.put(entry.getKey(), entry.getValue());
            }
        }
    }
    String tabletDir = tabletFiles.dir;
    String switchedDir = switchVolume(tabletDir, FileType.TABLE, replacements);
    if (switchedDir != null) {
        log.debug("Replacing volume {} : {} -> {}", extent, tabletDir, switchedDir);
        tabletDir = switchedDir;
    }
    if (logsToRemove.size() + filesToRemove.size() > 0 || switchedDir != null) {
        MetadataTableUtil.updateTabletVolumes(extent, logsToRemove, logsToAdd, filesToRemove, filesToAdd, switchedDir, zooLock, context);
        if (replicate) {
            Status status = StatusUtil.fileClosed();
            log.debug("Tablet directory switched, need to record old log files {} {}", logsToRemove, ProtobufUtil.toString(status));
            // Before deleting these logs, we need to mark them for replication
            for (LogEntry logEntry : logsToRemove) {
                ReplicationTableUtil.updateFiles(context, extent, logEntry.filename, status);
            }
        }
    }
    ret.dir = decommisionedTabletDir(context, zooLock, vm, extent, tabletDir);
    if (extent.isRootTablet()) {
        SortedMap<FileRef, DataFileValue> copy = ret.datafiles;
        ret.datafiles = new TreeMap<>();
        for (Entry<FileRef, DataFileValue> entry : copy.entrySet()) {
            ret.datafiles.put(new FileRef(new Path(ret.dir, entry.getKey().path().getName()).toString()), entry.getValue());
        }
    }
    // method this should return the exact strings that are in the metadata table
    return ret;
}
Also used : Path(org.apache.hadoop.fs.Path) Status(org.apache.accumulo.server.replication.proto.Replication.Status) FileStatus(org.apache.hadoop.fs.FileStatus) DataFileValue(org.apache.accumulo.core.metadata.schema.DataFileValue) ArrayList(java.util.ArrayList) TreeMap(java.util.TreeMap) LogEntry(org.apache.accumulo.core.tabletserver.log.LogEntry) Pair(org.apache.accumulo.core.util.Pair)

Example 4 with LogEntry

use of org.apache.accumulo.core.tabletserver.log.LogEntry in project accumulo by apache.

the class ZooTabletStateStore method unassign.

@Override
public void unassign(Collection<TabletLocationState> tablets, Map<TServerInstance, List<Path>> logsForDeadServers) throws DistributedStoreException {
    if (tablets.size() != 1)
        throw new IllegalArgumentException("There is only one root tablet");
    TabletLocationState tls = tablets.iterator().next();
    if (tls.extent.compareTo(RootTable.EXTENT) != 0)
        throw new IllegalArgumentException("You can only store the root tablet location");
    if (logsForDeadServers != null) {
        List<Path> logs = logsForDeadServers.get(tls.futureOrCurrent());
        if (logs != null) {
            for (Path entry : logs) {
                LogEntry logEntry = new LogEntry(RootTable.EXTENT, System.currentTimeMillis(), tls.futureOrCurrent().getLocation().toString(), entry.toString());
                byte[] value;
                try {
                    value = logEntry.toBytes();
                } catch (IOException ex) {
                    throw new DistributedStoreException(ex);
                }
                store.put(RootTable.ZROOT_TABLET_WALOGS + "/" + logEntry.getUniqueID(), value);
            }
        }
    }
    store.remove(RootTable.ZROOT_TABLET_LOCATION);
    store.remove(RootTable.ZROOT_TABLET_FUTURE_LOCATION);
    log.debug("unassign root tablet location");
}
Also used : Path(org.apache.hadoop.fs.Path) IOException(java.io.IOException) LogEntry(org.apache.accumulo.core.tabletserver.log.LogEntry)

Example 5 with LogEntry

use of org.apache.accumulo.core.tabletserver.log.LogEntry in project accumulo by apache.

the class MetadataTableUtil method getRootLogEntries.

static void getRootLogEntries(final ArrayList<LogEntry> result) throws KeeperException, InterruptedException, IOException {
    IZooReaderWriter zoo = ZooReaderWriter.getInstance();
    String root = getZookeeperLogLocation();
    // the data. The log can be removed in between.
    while (true) {
        result.clear();
        for (String child : zoo.getChildren(root)) {
            try {
                LogEntry e = LogEntry.fromBytes(zoo.getData(root + "/" + child, null));
                // upgrade from !0;!0<< -> +r<<
                e = new LogEntry(RootTable.EXTENT, 0, e.server, e.filename);
                result.add(e);
            } catch (KeeperException.NoNodeException ex) {
                continue;
            }
        }
        break;
    }
}
Also used : IZooReaderWriter(org.apache.accumulo.fate.zookeeper.IZooReaderWriter) LogEntry(org.apache.accumulo.core.tabletserver.log.LogEntry) KeeperException(org.apache.zookeeper.KeeperException)

Aggregations

LogEntry (org.apache.accumulo.core.tabletserver.log.LogEntry)19 Path (org.apache.hadoop.fs.Path)10 ArrayList (java.util.ArrayList)7 Mutation (org.apache.accumulo.core.data.Mutation)7 Value (org.apache.accumulo.core.data.Value)7 Text (org.apache.hadoop.io.Text)7 Scanner (org.apache.accumulo.core.client.Scanner)6 Key (org.apache.accumulo.core.data.Key)6 MetadataTable (org.apache.accumulo.core.metadata.MetadataTable)6 BatchWriter (org.apache.accumulo.core.client.BatchWriter)5 Table (org.apache.accumulo.core.client.impl.Table)5 Test (org.junit.Test)5 IOException (java.io.IOException)4 BatchWriterConfig (org.apache.accumulo.core.client.BatchWriterConfig)4 Connector (org.apache.accumulo.core.client.Connector)4 DataFileValue (org.apache.accumulo.core.metadata.schema.DataFileValue)4 KeyExtent (org.apache.accumulo.core.data.impl.KeyExtent)3 ReplicationTable (org.apache.accumulo.core.replication.ReplicationTable)3 Status (org.apache.accumulo.server.replication.proto.Replication.Status)3 File (java.io.File)2