use of org.apache.accumulo.core.data.impl.KeyExtent in project accumulo by apache.
the class TabletServerLogger method write.
private void write(final Collection<CommitSession> sessions, boolean mincFinish, Writer writer, Retry writeRetry) throws IOException {
// Work very hard not to lock this during calls to the outside world
int currentLogId = logId.get();
boolean success = false;
while (!success) {
try {
// get a reference to the loggers that no other thread can touch
DfsLogger copy = null;
AtomicInteger currentId = new AtomicInteger(-1);
copy = initializeLoggers(currentId);
currentLogId = currentId.get();
if (currentLogId == logId.get()) {
for (CommitSession commitSession : sessions) {
if (commitSession.beginUpdatingLogsUsed(copy, mincFinish)) {
try {
// Scribble out a tablet definition and then write to the metadata table
defineTablet(commitSession, writeRetry);
} finally {
commitSession.finishUpdatingLogsUsed();
}
// Need to release
KeyExtent extent = commitSession.getExtent();
if (ReplicationConfigurationUtil.isEnabled(extent, tserver.getTableConfiguration(extent))) {
Status status = StatusUtil.openWithUnknownLength(System.currentTimeMillis());
log.debug("Writing " + ProtobufUtil.toString(status) + " to metadata table for " + copy.getFileName());
// Got some new WALs, note this in the metadata table
ReplicationTableUtil.updateFiles(tserver, commitSession.getExtent(), copy.getFileName(), status);
}
}
}
}
// Make sure that the logs haven't changed out from underneath our copy
if (currentLogId == logId.get()) {
// write the mutation to the logs
LoggerOperation lop = writer.write(copy);
lop.await();
// double-check: did the log set change?
success = (currentLogId == logId.get());
}
} catch (DfsLogger.LogClosedException ex) {
writeRetry.logRetry(log, "Logs closed while writing", ex);
} catch (Exception t) {
writeRetry.logRetry(log, "Failed to write to WAL", t);
try {
// Backoff
writeRetry.waitForNextAttempt();
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new RuntimeException(e);
}
} finally {
writeRetry.useRetry();
}
// Some sort of write failure occurred. Grab the write lock and reset the logs.
// But since multiple threads will attempt it, only attempt the reset when
// the logs haven't changed.
final int finalCurrent = currentLogId;
if (!success) {
testLockAndRun(logIdLock, new TestCallWithWriteLock() {
@Override
boolean test() {
return finalCurrent == logId.get();
}
@Override
void withWriteLock() throws IOException {
close();
closeForReplication(sessions);
}
});
}
}
// if the log gets too big or too old, reset it .. grab the write lock first
// event, tid, seq overhead
logSizeEstimate.addAndGet(4 * 3);
testLockAndRun(logIdLock, new TestCallWithWriteLock() {
@Override
boolean test() {
return (logSizeEstimate.get() > maxSize) || ((System.currentTimeMillis() - createTime) > maxAge);
}
@Override
void withWriteLock() throws IOException {
close();
closeForReplication(sessions);
}
});
}
use of org.apache.accumulo.core.data.impl.KeyExtent in project accumulo by apache.
the class LogFileKey method readFields.
@Override
public void readFields(DataInput in) throws IOException {
int value = in.readByte();
if (value >= LogEvents.values().length) {
throw new IOException("Invalid LogEvent type, got ordinal " + value + ", but only know about " + LogEvents.values().length + " possible types.");
}
event = LogEvents.values()[value];
switch(event) {
case OPEN:
tid = in.readInt();
tserverSession = in.readUTF();
if (tid != VERSION) {
throw new RuntimeException(String.format("Bad version number for log file: expected %d, but saw %d", VERSION, tid));
}
break;
case COMPACTION_FINISH:
seq = in.readLong();
tid = in.readInt();
break;
case COMPACTION_START:
seq = in.readLong();
tid = in.readInt();
filename = in.readUTF();
break;
case DEFINE_TABLET:
seq = in.readLong();
tid = in.readInt();
tablet = new KeyExtent();
tablet.readFields(in);
break;
case MANY_MUTATIONS:
seq = in.readLong();
tid = in.readInt();
break;
case MUTATION:
seq = in.readLong();
tid = in.readInt();
break;
default:
throw new RuntimeException("Unknown log event type: " + event);
}
}
use of org.apache.accumulo.core.data.impl.KeyExtent in project accumulo by apache.
the class MetadataLocationObtainer method lookupTablets.
@Override
public List<TabletLocation> lookupTablets(ClientContext context, String tserver, Map<KeyExtent, List<Range>> tabletsRanges, TabletLocator parent) throws AccumuloSecurityException, AccumuloException {
final TreeMap<Key, Value> results = new TreeMap<>();
ResultReceiver rr = new ResultReceiver() {
@Override
public void receive(List<Entry<Key, Value>> entries) {
for (Entry<Key, Value> entry : entries) {
try {
results.putAll(WholeRowIterator.decodeRow(entry.getKey(), entry.getValue()));
} catch (IOException e) {
throw new RuntimeException(e);
}
}
}
};
ScannerOptions opts = null;
try (SettableScannerOptions unsetOpts = new SettableScannerOptions()) {
opts = unsetOpts.setColumns(locCols);
}
Map<KeyExtent, List<Range>> unscanned = new HashMap<>();
Map<KeyExtent, List<Range>> failures = new HashMap<>();
try {
TabletServerBatchReaderIterator.doLookup(context, tserver, tabletsRanges, failures, unscanned, rr, columns, opts, Authorizations.EMPTY);
if (failures.size() > 0) {
// invalidate extents in parents cache
if (log.isTraceEnabled())
log.trace("lookupTablets failed for {} extents", failures.size());
parent.invalidateCache(failures.keySet());
}
} catch (IOException e) {
log.trace("lookupTablets failed server={}", tserver, e);
parent.invalidateCache(context.getInstance(), tserver);
} catch (AccumuloServerException e) {
log.trace("lookupTablets failed server={}", tserver, e);
throw e;
}
return MetadataLocationObtainer.getMetadataLocationEntries(results).getLocations();
}
use of org.apache.accumulo.core.data.impl.KeyExtent in project accumulo by apache.
the class TabletMetadata method convertRow.
public static TabletMetadata convertRow(Iterator<Entry<Key, Value>> rowIter, EnumSet<FetchedColumns> fetchedColumns) {
Objects.requireNonNull(rowIter);
TabletMetadata te = new TabletMetadata();
Builder<String> filesBuilder = ImmutableList.builder();
ByteSequence row = null;
while (rowIter.hasNext()) {
Entry<Key, Value> kv = rowIter.next();
Key k = kv.getKey();
Value v = kv.getValue();
Text fam = k.getColumnFamily();
if (row == null) {
row = k.getRowData();
KeyExtent ke = new KeyExtent(k.getRow(), (Text) null);
te.endRow = ke.getEndRow();
te.tableId = ke.getTableId();
} else if (!row.equals(k.getRowData())) {
throw new IllegalArgumentException("Input contains more than one row : " + row + " " + k.getRowData());
}
if (PREV_ROW_COLUMN.hasColumns(k)) {
te.prevEndRow = KeyExtent.decodePrevEndRow(v);
}
if (fam.equals(DataFileColumnFamily.NAME)) {
filesBuilder.add(k.getColumnQualifier().toString());
} else if (fam.equals(CurrentLocationColumnFamily.NAME)) {
if (te.location != null) {
throw new IllegalArgumentException("Input contains more than one location " + te.location + " " + v);
}
te.location = new Location(v.toString(), k.getColumnQualifierData().toString(), LocationType.CURRENT);
} else if (fam.equals(FutureLocationColumnFamily.NAME)) {
if (te.location != null) {
throw new IllegalArgumentException("Input contains more than one location " + te.location + " " + v);
}
te.location = new Location(v.toString(), k.getColumnQualifierData().toString(), LocationType.FUTURE);
} else if (fam.equals(LastLocationColumnFamily.NAME)) {
te.last = new Location(v.toString(), k.getColumnQualifierData().toString(), LocationType.LAST);
}
}
te.files = filesBuilder.build();
te.fetchedColumns = fetchedColumns;
return te;
}
use of org.apache.accumulo.core.data.impl.KeyExtent in project accumulo by apache.
the class ChaoticLoadBalancer method balance.
@Override
public long balance(SortedMap<TServerInstance, TabletServerStatus> current, Set<KeyExtent> migrations, List<TabletMigration> migrationsOut) {
Map<TServerInstance, Long> numTablets = new HashMap<>();
List<TServerInstance> underCapacityTServer = new ArrayList<>();
if (!migrations.isEmpty()) {
outstandingMigrations.migrations = migrations;
constraintNotMet(outstandingMigrations);
return 100;
}
resetBalancerErrors();
boolean moveMetadata = r.nextInt(4) == 0;
long totalTablets = 0;
for (Entry<TServerInstance, TabletServerStatus> e : current.entrySet()) {
long tabletCount = 0;
for (TableInfo ti : e.getValue().getTableMap().values()) {
tabletCount += ti.tablets;
}
numTablets.put(e.getKey(), tabletCount);
underCapacityTServer.add(e.getKey());
totalTablets += tabletCount;
}
// totalTablets is fuzzy due to asynchronicity of the stats
// *1.2 to handle fuzziness, and prevent locking for 'perfect' balancing scenarios
long avg = (long) Math.ceil(((double) totalTablets) / current.size() * 1.2);
for (Entry<TServerInstance, TabletServerStatus> e : current.entrySet()) {
for (String tableId : e.getValue().getTableMap().keySet()) {
Table.ID id = Table.ID.of(tableId);
if (!moveMetadata && MetadataTable.ID.equals(id))
continue;
try {
for (TabletStats ts : getOnlineTabletsForTable(e.getKey(), id)) {
KeyExtent ke = new KeyExtent(ts.extent);
int index = r.nextInt(underCapacityTServer.size());
TServerInstance dest = underCapacityTServer.get(index);
if (dest.equals(e.getKey()))
continue;
migrationsOut.add(new TabletMigration(ke, e.getKey(), dest));
if (numTablets.put(dest, numTablets.get(dest) + 1) > avg)
underCapacityTServer.remove(index);
if (numTablets.put(e.getKey(), numTablets.get(e.getKey()) - 1) <= avg && !underCapacityTServer.contains(e.getKey()))
underCapacityTServer.add(e.getKey());
// We can get some craziness with only 1 tserver, so lets make sure there's always an option!
if (underCapacityTServer.isEmpty())
underCapacityTServer.addAll(numTablets.keySet());
}
} catch (ThriftSecurityException e1) {
// Shouldn't happen, but carry on if it does
log.debug("Encountered ThriftSecurityException. This should not happen. Carrying on anyway.", e1);
} catch (TException e1) {
// Shouldn't happen, but carry on if it does
log.debug("Encountered TException. This should not happen. Carrying on anyway.", e1);
}
}
}
return 100;
}
Aggregations