Search in sources :

Example 46 with KeyExtent

use of org.apache.accumulo.core.data.impl.KeyExtent in project accumulo by apache.

the class TabletServerLogger method write.

private void write(final Collection<CommitSession> sessions, boolean mincFinish, Writer writer, Retry writeRetry) throws IOException {
    // Work very hard not to lock this during calls to the outside world
    int currentLogId = logId.get();
    boolean success = false;
    while (!success) {
        try {
            // get a reference to the loggers that no other thread can touch
            DfsLogger copy = null;
            AtomicInteger currentId = new AtomicInteger(-1);
            copy = initializeLoggers(currentId);
            currentLogId = currentId.get();
            if (currentLogId == logId.get()) {
                for (CommitSession commitSession : sessions) {
                    if (commitSession.beginUpdatingLogsUsed(copy, mincFinish)) {
                        try {
                            // Scribble out a tablet definition and then write to the metadata table
                            defineTablet(commitSession, writeRetry);
                        } finally {
                            commitSession.finishUpdatingLogsUsed();
                        }
                        // Need to release
                        KeyExtent extent = commitSession.getExtent();
                        if (ReplicationConfigurationUtil.isEnabled(extent, tserver.getTableConfiguration(extent))) {
                            Status status = StatusUtil.openWithUnknownLength(System.currentTimeMillis());
                            log.debug("Writing " + ProtobufUtil.toString(status) + " to metadata table for " + copy.getFileName());
                            // Got some new WALs, note this in the metadata table
                            ReplicationTableUtil.updateFiles(tserver, commitSession.getExtent(), copy.getFileName(), status);
                        }
                    }
                }
            }
            // Make sure that the logs haven't changed out from underneath our copy
            if (currentLogId == logId.get()) {
                // write the mutation to the logs
                LoggerOperation lop = writer.write(copy);
                lop.await();
                // double-check: did the log set change?
                success = (currentLogId == logId.get());
            }
        } catch (DfsLogger.LogClosedException ex) {
            writeRetry.logRetry(log, "Logs closed while writing", ex);
        } catch (Exception t) {
            writeRetry.logRetry(log, "Failed to write to WAL", t);
            try {
                // Backoff
                writeRetry.waitForNextAttempt();
            } catch (InterruptedException e) {
                Thread.currentThread().interrupt();
                throw new RuntimeException(e);
            }
        } finally {
            writeRetry.useRetry();
        }
        // Some sort of write failure occurred. Grab the write lock and reset the logs.
        // But since multiple threads will attempt it, only attempt the reset when
        // the logs haven't changed.
        final int finalCurrent = currentLogId;
        if (!success) {
            testLockAndRun(logIdLock, new TestCallWithWriteLock() {

                @Override
                boolean test() {
                    return finalCurrent == logId.get();
                }

                @Override
                void withWriteLock() throws IOException {
                    close();
                    closeForReplication(sessions);
                }
            });
        }
    }
    // if the log gets too big or too old, reset it .. grab the write lock first
    // event, tid, seq overhead
    logSizeEstimate.addAndGet(4 * 3);
    testLockAndRun(logIdLock, new TestCallWithWriteLock() {

        @Override
        boolean test() {
            return (logSizeEstimate.get() > maxSize) || ((System.currentTimeMillis() - createTime) > maxAge);
        }

        @Override
        void withWriteLock() throws IOException {
            close();
            closeForReplication(sessions);
        }
    });
}
Also used : Status(org.apache.accumulo.server.replication.proto.Replication.Status) CommitSession(org.apache.accumulo.tserver.tablet.CommitSession) IOException(java.io.IOException) KeyExtent(org.apache.accumulo.core.data.impl.KeyExtent) LoggerOperation(org.apache.accumulo.tserver.log.DfsLogger.LoggerOperation) IOException(java.io.IOException) AtomicInteger(java.util.concurrent.atomic.AtomicInteger)

Example 47 with KeyExtent

use of org.apache.accumulo.core.data.impl.KeyExtent in project accumulo by apache.

the class LogFileKey method readFields.

@Override
public void readFields(DataInput in) throws IOException {
    int value = in.readByte();
    if (value >= LogEvents.values().length) {
        throw new IOException("Invalid LogEvent type, got ordinal " + value + ", but only know about " + LogEvents.values().length + " possible types.");
    }
    event = LogEvents.values()[value];
    switch(event) {
        case OPEN:
            tid = in.readInt();
            tserverSession = in.readUTF();
            if (tid != VERSION) {
                throw new RuntimeException(String.format("Bad version number for log file: expected %d, but saw %d", VERSION, tid));
            }
            break;
        case COMPACTION_FINISH:
            seq = in.readLong();
            tid = in.readInt();
            break;
        case COMPACTION_START:
            seq = in.readLong();
            tid = in.readInt();
            filename = in.readUTF();
            break;
        case DEFINE_TABLET:
            seq = in.readLong();
            tid = in.readInt();
            tablet = new KeyExtent();
            tablet.readFields(in);
            break;
        case MANY_MUTATIONS:
            seq = in.readLong();
            tid = in.readInt();
            break;
        case MUTATION:
            seq = in.readLong();
            tid = in.readInt();
            break;
        default:
            throw new RuntimeException("Unknown log event type: " + event);
    }
}
Also used : IOException(java.io.IOException) KeyExtent(org.apache.accumulo.core.data.impl.KeyExtent)

Example 48 with KeyExtent

use of org.apache.accumulo.core.data.impl.KeyExtent in project accumulo by apache.

the class MetadataLocationObtainer method lookupTablets.

@Override
public List<TabletLocation> lookupTablets(ClientContext context, String tserver, Map<KeyExtent, List<Range>> tabletsRanges, TabletLocator parent) throws AccumuloSecurityException, AccumuloException {
    final TreeMap<Key, Value> results = new TreeMap<>();
    ResultReceiver rr = new ResultReceiver() {

        @Override
        public void receive(List<Entry<Key, Value>> entries) {
            for (Entry<Key, Value> entry : entries) {
                try {
                    results.putAll(WholeRowIterator.decodeRow(entry.getKey(), entry.getValue()));
                } catch (IOException e) {
                    throw new RuntimeException(e);
                }
            }
        }
    };
    ScannerOptions opts = null;
    try (SettableScannerOptions unsetOpts = new SettableScannerOptions()) {
        opts = unsetOpts.setColumns(locCols);
    }
    Map<KeyExtent, List<Range>> unscanned = new HashMap<>();
    Map<KeyExtent, List<Range>> failures = new HashMap<>();
    try {
        TabletServerBatchReaderIterator.doLookup(context, tserver, tabletsRanges, failures, unscanned, rr, columns, opts, Authorizations.EMPTY);
        if (failures.size() > 0) {
            // invalidate extents in parents cache
            if (log.isTraceEnabled())
                log.trace("lookupTablets failed for {} extents", failures.size());
            parent.invalidateCache(failures.keySet());
        }
    } catch (IOException e) {
        log.trace("lookupTablets failed server={}", tserver, e);
        parent.invalidateCache(context.getInstance(), tserver);
    } catch (AccumuloServerException e) {
        log.trace("lookupTablets failed server={}", tserver, e);
        throw e;
    }
    return MetadataLocationObtainer.getMetadataLocationEntries(results).getLocations();
}
Also used : HashMap(java.util.HashMap) IOException(java.io.IOException) TreeMap(java.util.TreeMap) KeyExtent(org.apache.accumulo.core.data.impl.KeyExtent) AccumuloServerException(org.apache.accumulo.core.client.impl.AccumuloServerException) Value(org.apache.accumulo.core.data.Value) ArrayList(java.util.ArrayList) List(java.util.List) ScannerOptions(org.apache.accumulo.core.client.impl.ScannerOptions) Key(org.apache.accumulo.core.data.Key) PartialKey(org.apache.accumulo.core.data.PartialKey) ResultReceiver(org.apache.accumulo.core.client.impl.TabletServerBatchReaderIterator.ResultReceiver)

Example 49 with KeyExtent

use of org.apache.accumulo.core.data.impl.KeyExtent in project accumulo by apache.

the class TabletMetadata method convertRow.

public static TabletMetadata convertRow(Iterator<Entry<Key, Value>> rowIter, EnumSet<FetchedColumns> fetchedColumns) {
    Objects.requireNonNull(rowIter);
    TabletMetadata te = new TabletMetadata();
    Builder<String> filesBuilder = ImmutableList.builder();
    ByteSequence row = null;
    while (rowIter.hasNext()) {
        Entry<Key, Value> kv = rowIter.next();
        Key k = kv.getKey();
        Value v = kv.getValue();
        Text fam = k.getColumnFamily();
        if (row == null) {
            row = k.getRowData();
            KeyExtent ke = new KeyExtent(k.getRow(), (Text) null);
            te.endRow = ke.getEndRow();
            te.tableId = ke.getTableId();
        } else if (!row.equals(k.getRowData())) {
            throw new IllegalArgumentException("Input contains more than one row : " + row + " " + k.getRowData());
        }
        if (PREV_ROW_COLUMN.hasColumns(k)) {
            te.prevEndRow = KeyExtent.decodePrevEndRow(v);
        }
        if (fam.equals(DataFileColumnFamily.NAME)) {
            filesBuilder.add(k.getColumnQualifier().toString());
        } else if (fam.equals(CurrentLocationColumnFamily.NAME)) {
            if (te.location != null) {
                throw new IllegalArgumentException("Input contains more than one location " + te.location + " " + v);
            }
            te.location = new Location(v.toString(), k.getColumnQualifierData().toString(), LocationType.CURRENT);
        } else if (fam.equals(FutureLocationColumnFamily.NAME)) {
            if (te.location != null) {
                throw new IllegalArgumentException("Input contains more than one location " + te.location + " " + v);
            }
            te.location = new Location(v.toString(), k.getColumnQualifierData().toString(), LocationType.FUTURE);
        } else if (fam.equals(LastLocationColumnFamily.NAME)) {
            te.last = new Location(v.toString(), k.getColumnQualifierData().toString(), LocationType.LAST);
        }
    }
    te.files = filesBuilder.build();
    te.fetchedColumns = fetchedColumns;
    return te;
}
Also used : Value(org.apache.accumulo.core.data.Value) Text(org.apache.hadoop.io.Text) KeyExtent(org.apache.accumulo.core.data.impl.KeyExtent) ByteSequence(org.apache.accumulo.core.data.ByteSequence) Key(org.apache.accumulo.core.data.Key)

Example 50 with KeyExtent

use of org.apache.accumulo.core.data.impl.KeyExtent in project accumulo by apache.

the class ChaoticLoadBalancer method balance.

@Override
public long balance(SortedMap<TServerInstance, TabletServerStatus> current, Set<KeyExtent> migrations, List<TabletMigration> migrationsOut) {
    Map<TServerInstance, Long> numTablets = new HashMap<>();
    List<TServerInstance> underCapacityTServer = new ArrayList<>();
    if (!migrations.isEmpty()) {
        outstandingMigrations.migrations = migrations;
        constraintNotMet(outstandingMigrations);
        return 100;
    }
    resetBalancerErrors();
    boolean moveMetadata = r.nextInt(4) == 0;
    long totalTablets = 0;
    for (Entry<TServerInstance, TabletServerStatus> e : current.entrySet()) {
        long tabletCount = 0;
        for (TableInfo ti : e.getValue().getTableMap().values()) {
            tabletCount += ti.tablets;
        }
        numTablets.put(e.getKey(), tabletCount);
        underCapacityTServer.add(e.getKey());
        totalTablets += tabletCount;
    }
    // totalTablets is fuzzy due to asynchronicity of the stats
    // *1.2 to handle fuzziness, and prevent locking for 'perfect' balancing scenarios
    long avg = (long) Math.ceil(((double) totalTablets) / current.size() * 1.2);
    for (Entry<TServerInstance, TabletServerStatus> e : current.entrySet()) {
        for (String tableId : e.getValue().getTableMap().keySet()) {
            Table.ID id = Table.ID.of(tableId);
            if (!moveMetadata && MetadataTable.ID.equals(id))
                continue;
            try {
                for (TabletStats ts : getOnlineTabletsForTable(e.getKey(), id)) {
                    KeyExtent ke = new KeyExtent(ts.extent);
                    int index = r.nextInt(underCapacityTServer.size());
                    TServerInstance dest = underCapacityTServer.get(index);
                    if (dest.equals(e.getKey()))
                        continue;
                    migrationsOut.add(new TabletMigration(ke, e.getKey(), dest));
                    if (numTablets.put(dest, numTablets.get(dest) + 1) > avg)
                        underCapacityTServer.remove(index);
                    if (numTablets.put(e.getKey(), numTablets.get(e.getKey()) - 1) <= avg && !underCapacityTServer.contains(e.getKey()))
                        underCapacityTServer.add(e.getKey());
                    // We can get some craziness with only 1 tserver, so lets make sure there's always an option!
                    if (underCapacityTServer.isEmpty())
                        underCapacityTServer.addAll(numTablets.keySet());
                }
            } catch (ThriftSecurityException e1) {
                // Shouldn't happen, but carry on if it does
                log.debug("Encountered ThriftSecurityException.  This should not happen.  Carrying on anyway.", e1);
            } catch (TException e1) {
                // Shouldn't happen, but carry on if it does
                log.debug("Encountered TException.  This should not happen.  Carrying on anyway.", e1);
            }
        }
    }
    return 100;
}
Also used : TException(org.apache.thrift.TException) Table(org.apache.accumulo.core.client.impl.Table) MetadataTable(org.apache.accumulo.core.metadata.MetadataTable) TabletMigration(org.apache.accumulo.server.master.state.TabletMigration) HashMap(java.util.HashMap) TabletStats(org.apache.accumulo.core.tabletserver.thrift.TabletStats) ArrayList(java.util.ArrayList) KeyExtent(org.apache.accumulo.core.data.impl.KeyExtent) ThriftSecurityException(org.apache.accumulo.core.client.impl.thrift.ThriftSecurityException) TServerInstance(org.apache.accumulo.server.master.state.TServerInstance) TableInfo(org.apache.accumulo.core.master.thrift.TableInfo) TabletServerStatus(org.apache.accumulo.core.master.thrift.TabletServerStatus)

Aggregations

KeyExtent (org.apache.accumulo.core.data.impl.KeyExtent)219 Test (org.junit.Test)84 Text (org.apache.hadoop.io.Text)82 Value (org.apache.accumulo.core.data.Value)67 ArrayList (java.util.ArrayList)63 Key (org.apache.accumulo.core.data.Key)59 HashMap (java.util.HashMap)50 Mutation (org.apache.accumulo.core.data.Mutation)40 Scanner (org.apache.accumulo.core.client.Scanner)39 Range (org.apache.accumulo.core.data.Range)39 TreeMap (java.util.TreeMap)37 TServerInstance (org.apache.accumulo.server.master.state.TServerInstance)36 Table (org.apache.accumulo.core.client.impl.Table)34 HashSet (java.util.HashSet)30 List (java.util.List)29 TKeyExtent (org.apache.accumulo.core.data.thrift.TKeyExtent)29 Connector (org.apache.accumulo.core.client.Connector)28 IOException (java.io.IOException)27 MetadataTable (org.apache.accumulo.core.metadata.MetadataTable)25 DataFileValue (org.apache.accumulo.core.metadata.schema.DataFileValue)25