Search in sources :

Example 26 with KeyExtent

use of org.apache.accumulo.core.data.impl.KeyExtent in project accumulo by apache.

the class WALSunnyDayIT method getRecoveryMarkers.

private Map<KeyExtent, List<String>> getRecoveryMarkers(Connector c) throws Exception {
    Map<KeyExtent, List<String>> result = new HashMap<>();
    try (Scanner root = c.createScanner(RootTable.NAME, EMPTY);
        Scanner meta = c.createScanner(MetadataTable.NAME, EMPTY)) {
        root.setRange(TabletsSection.getRange());
        root.fetchColumnFamily(TabletsSection.LogColumnFamily.NAME);
        TabletColumnFamily.PREV_ROW_COLUMN.fetch(root);
        meta.setRange(TabletsSection.getRange());
        meta.fetchColumnFamily(TabletsSection.LogColumnFamily.NAME);
        TabletColumnFamily.PREV_ROW_COLUMN.fetch(meta);
        List<String> logs = new ArrayList<>();
        Iterator<Entry<Key, Value>> both = Iterators.concat(root.iterator(), meta.iterator());
        while (both.hasNext()) {
            Entry<Key, Value> entry = both.next();
            Key key = entry.getKey();
            if (key.getColumnFamily().equals(TabletsSection.LogColumnFamily.NAME)) {
                logs.add(key.getColumnQualifier().toString());
            }
            if (TabletColumnFamily.PREV_ROW_COLUMN.hasColumns(key) && !logs.isEmpty()) {
                KeyExtent extent = new KeyExtent(key.getRow(), entry.getValue());
                result.put(extent, logs);
                logs = new ArrayList<>();
            }
        }
    }
    return result;
}
Also used : Scanner(org.apache.accumulo.core.client.Scanner) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) KeyExtent(org.apache.accumulo.core.data.impl.KeyExtent) Entry(java.util.Map.Entry) Value(org.apache.accumulo.core.data.Value) ArrayList(java.util.ArrayList) List(java.util.List) Key(org.apache.accumulo.core.data.Key)

Example 27 with KeyExtent

use of org.apache.accumulo.core.data.impl.KeyExtent in project accumulo by apache.

the class TabletServer method checkTabletMetadata.

static Value checkTabletMetadata(KeyExtent extent, TServerInstance instance, SortedMap<Key, Value> tabletsKeyValues, Text metadataEntry) throws AccumuloException {
    TServerInstance future = null;
    Value prevEndRow = null;
    Value dir = null;
    Value time = null;
    for (Entry<Key, Value> entry : tabletsKeyValues.entrySet()) {
        Key key = entry.getKey();
        if (!metadataEntry.equals(key.getRow())) {
            log.info("Unexpected row in tablet metadata {} {}", metadataEntry, key.getRow());
            return null;
        }
        Text cf = key.getColumnFamily();
        if (cf.equals(TabletsSection.FutureLocationColumnFamily.NAME)) {
            if (future != null) {
                throw new AccumuloException("Tablet has multiple future locations " + extent);
            }
            future = new TServerInstance(entry.getValue(), key.getColumnQualifier());
        } else if (cf.equals(TabletsSection.CurrentLocationColumnFamily.NAME)) {
            log.info("Tablet seems to be already assigned to {} {}", new TServerInstance(entry.getValue(), key.getColumnQualifier()));
            return null;
        } else if (TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.hasColumns(key)) {
            prevEndRow = entry.getValue();
        } else if (TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.hasColumns(key)) {
            dir = entry.getValue();
        } else if (TabletsSection.ServerColumnFamily.TIME_COLUMN.hasColumns(key)) {
            time = entry.getValue();
        }
    }
    if (prevEndRow == null) {
        throw new AccumuloException("Metadata entry does not have prev row (" + metadataEntry + ")");
    } else {
        KeyExtent ke2 = new KeyExtent(metadataEntry, prevEndRow);
        if (!extent.equals(ke2)) {
            log.info("Tablet prev end row mismatch {} {}", extent, ke2.getPrevEndRow());
            return null;
        }
    }
    if (dir == null) {
        throw new AccumuloException("Metadata entry does not have directory (" + metadataEntry + ")");
    }
    if (time == null && !extent.equals(RootTable.OLD_EXTENT)) {
        throw new AccumuloException("Metadata entry does not have time (" + metadataEntry + ")");
    }
    if (future == null) {
        log.info("The master has not assigned {} to ", extent, instance);
        return null;
    }
    if (!instance.equals(future)) {
        log.info("Table {} has been assigned to {} which is not {}", extent, future, instance);
        return null;
    }
    return dir;
}
Also used : AccumuloException(org.apache.accumulo.core.client.AccumuloException) TKeyValue(org.apache.accumulo.core.data.thrift.TKeyValue) Value(org.apache.accumulo.core.data.Value) Text(org.apache.hadoop.io.Text) TKeyExtent(org.apache.accumulo.core.data.thrift.TKeyExtent) KeyExtent(org.apache.accumulo.core.data.impl.KeyExtent) TServerInstance(org.apache.accumulo.server.master.state.TServerInstance) Key(org.apache.accumulo.core.data.Key)

Example 28 with KeyExtent

use of org.apache.accumulo.core.data.impl.KeyExtent in project accumulo by apache.

the class SortedLogRecovery method findLastStartToFinish.

int findLastStartToFinish(MultiReader reader, int fileno, KeyExtent extent, Set<String> tabletFiles, LastStartToFinish lastStartToFinish) throws IOException, EmptyMapFileException, UnusedException {
    HashSet<String> suffixes = new HashSet<>();
    for (String path : tabletFiles) suffixes.add(getPathSuffix(path));
    // Scan for tableId for this extent (should always be in the log)
    LogFileKey key = new LogFileKey();
    LogFileValue value = new LogFileValue();
    int tid = -1;
    if (!reader.next(key, value))
        throw new EmptyMapFileException();
    if (key.event != OPEN)
        throw new RuntimeException("First log entry value is not OPEN");
    if (key.tserverSession.compareTo(lastStartToFinish.tserverSession) != 0) {
        if (lastStartToFinish.compactionStatus == Status.LOOKING_FOR_FINISH)
            throw new RuntimeException("COMPACTION_FINISH (without preceding COMPACTION_START) is not followed by a successful minor compaction.");
        lastStartToFinish.update(key.tserverSession);
    }
    KeyExtent alternative = extent;
    if (extent.isRootTablet()) {
        alternative = RootTable.OLD_EXTENT;
    }
    LogFileKey defineKey = null;
    // for the maximum tablet id, find the minimum sequence #... may be ok to find the max seq, but just want to make the code behave like it used to
    while (reader.next(key, value)) {
        if (key.event != DEFINE_TABLET)
            break;
        if (key.tablet.equals(extent) || key.tablet.equals(alternative)) {
            if (tid != key.tid) {
                tid = key.tid;
                defineKey = key;
                key = new LogFileKey();
            }
        }
    }
    if (tid < 0) {
        throw new UnusedException();
    }
    log.debug("Found tid, seq {} {}", tid, defineKey.seq);
    // Scan start/stop events for this tablet
    key = defineKey;
    key.event = COMPACTION_START;
    reader.seek(key);
    while (reader.next(key, value)) {
        // LogFileEntry.printEntry(entry);
        if (key.tid != tid)
            break;
        if (key.event == COMPACTION_START) {
            if (lastStartToFinish.compactionStatus == Status.INITIAL)
                lastStartToFinish.compactionStatus = Status.COMPLETE;
            if (key.seq <= lastStartToFinish.lastStart)
                throw new RuntimeException("Sequence numbers are not increasing for start/stop events: " + key.seq + " vs " + lastStartToFinish.lastStart);
            lastStartToFinish.update(fileno, key.seq);
            // Tablet server finished the minor compaction, but didn't remove the entry from the METADATA table.
            log.debug("minor compaction into {} finished, but was still in the METADATA", key.filename);
            if (suffixes.contains(getPathSuffix(key.filename)))
                lastStartToFinish.update(-1);
        } else if (key.event == COMPACTION_FINISH) {
            if (key.seq <= lastStartToFinish.lastStart)
                throw new RuntimeException("Sequence numbers are not increasing for start/stop events: " + key.seq + " vs " + lastStartToFinish.lastStart);
            if (lastStartToFinish.compactionStatus == Status.INITIAL)
                lastStartToFinish.compactionStatus = Status.LOOKING_FOR_FINISH;
            else if (lastStartToFinish.lastFinish > lastStartToFinish.lastStart)
                throw new RuntimeException("COMPACTION_FINISH does not have preceding COMPACTION_START event.");
            else
                lastStartToFinish.compactionStatus = Status.COMPLETE;
            lastStartToFinish.update(key.seq);
        } else
            break;
    }
    return tid;
}
Also used : LogFileValue(org.apache.accumulo.tserver.logger.LogFileValue) LogFileKey(org.apache.accumulo.tserver.logger.LogFileKey) KeyExtent(org.apache.accumulo.core.data.impl.KeyExtent) HashSet(java.util.HashSet)

Example 29 with KeyExtent

use of org.apache.accumulo.core.data.impl.KeyExtent in project accumulo by apache.

the class LogFileKey method readFields.

@Override
public void readFields(DataInput in) throws IOException {
    int value = in.readByte();
    if (value >= LogEvents.values().length) {
        throw new IOException("Invalid LogEvent type, got ordinal " + value + ", but only know about " + LogEvents.values().length + " possible types.");
    }
    event = LogEvents.values()[value];
    switch(event) {
        case OPEN:
            tid = in.readInt();
            tserverSession = in.readUTF();
            if (tid != VERSION) {
                throw new RuntimeException(String.format("Bad version number for log file: expected %d, but saw %d", VERSION, tid));
            }
            break;
        case COMPACTION_FINISH:
            seq = in.readLong();
            tid = in.readInt();
            break;
        case COMPACTION_START:
            seq = in.readLong();
            tid = in.readInt();
            filename = in.readUTF();
            break;
        case DEFINE_TABLET:
            seq = in.readLong();
            tid = in.readInt();
            tablet = new KeyExtent();
            tablet.readFields(in);
            break;
        case MANY_MUTATIONS:
            seq = in.readLong();
            tid = in.readInt();
            break;
        case MUTATION:
            seq = in.readLong();
            tid = in.readInt();
            break;
        default:
            throw new RuntimeException("Unknown log event type: " + event);
    }
}
Also used : IOException(java.io.IOException) KeyExtent(org.apache.accumulo.core.data.impl.KeyExtent)

Example 30 with KeyExtent

use of org.apache.accumulo.core.data.impl.KeyExtent in project accumulo by apache.

the class TabletMetadata method convertRow.

public static TabletMetadata convertRow(Iterator<Entry<Key, Value>> rowIter, EnumSet<FetchedColumns> fetchedColumns) {
    Objects.requireNonNull(rowIter);
    TabletMetadata te = new TabletMetadata();
    Builder<String> filesBuilder = ImmutableList.builder();
    ByteSequence row = null;
    while (rowIter.hasNext()) {
        Entry<Key, Value> kv = rowIter.next();
        Key k = kv.getKey();
        Value v = kv.getValue();
        Text fam = k.getColumnFamily();
        if (row == null) {
            row = k.getRowData();
            KeyExtent ke = new KeyExtent(k.getRow(), (Text) null);
            te.endRow = ke.getEndRow();
            te.tableId = ke.getTableId();
        } else if (!row.equals(k.getRowData())) {
            throw new IllegalArgumentException("Input contains more than one row : " + row + " " + k.getRowData());
        }
        if (PREV_ROW_COLUMN.hasColumns(k)) {
            te.prevEndRow = KeyExtent.decodePrevEndRow(v);
        }
        if (fam.equals(DataFileColumnFamily.NAME)) {
            filesBuilder.add(k.getColumnQualifier().toString());
        } else if (fam.equals(CurrentLocationColumnFamily.NAME)) {
            if (te.location != null) {
                throw new IllegalArgumentException("Input contains more than one location " + te.location + " " + v);
            }
            te.location = new Location(v.toString(), k.getColumnQualifierData().toString(), LocationType.CURRENT);
        } else if (fam.equals(FutureLocationColumnFamily.NAME)) {
            if (te.location != null) {
                throw new IllegalArgumentException("Input contains more than one location " + te.location + " " + v);
            }
            te.location = new Location(v.toString(), k.getColumnQualifierData().toString(), LocationType.FUTURE);
        } else if (fam.equals(LastLocationColumnFamily.NAME)) {
            te.last = new Location(v.toString(), k.getColumnQualifierData().toString(), LocationType.LAST);
        }
    }
    te.files = filesBuilder.build();
    te.fetchedColumns = fetchedColumns;
    return te;
}
Also used : Value(org.apache.accumulo.core.data.Value) Text(org.apache.hadoop.io.Text) KeyExtent(org.apache.accumulo.core.data.impl.KeyExtent) ByteSequence(org.apache.accumulo.core.data.ByteSequence) Key(org.apache.accumulo.core.data.Key)

Aggregations

KeyExtent (org.apache.accumulo.core.data.impl.KeyExtent)142 Text (org.apache.hadoop.io.Text)54 Value (org.apache.accumulo.core.data.Value)47 Key (org.apache.accumulo.core.data.Key)43 Test (org.junit.Test)41 ArrayList (java.util.ArrayList)37 HashMap (java.util.HashMap)33 Range (org.apache.accumulo.core.data.Range)32 TreeMap (java.util.TreeMap)27 Scanner (org.apache.accumulo.core.client.Scanner)27 Mutation (org.apache.accumulo.core.data.Mutation)27 TServerInstance (org.apache.accumulo.server.master.state.TServerInstance)21 HashSet (java.util.HashSet)20 IOException (java.io.IOException)19 List (java.util.List)19 Table (org.apache.accumulo.core.client.impl.Table)19 MetadataTable (org.apache.accumulo.core.metadata.MetadataTable)19 AccumuloException (org.apache.accumulo.core.client.AccumuloException)18 PartialKey (org.apache.accumulo.core.data.PartialKey)17 TKeyExtent (org.apache.accumulo.core.data.thrift.TKeyExtent)17