use of org.apache.accumulo.core.client.TableOfflineException in project accumulo by apache.
the class TableOperationsImpl method splitRangeByTablets.
@Override
public Set<Range> splitRangeByTablets(String tableName, Range range, int maxSplits) throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
checkArgument(tableName != null, "tableName is null");
checkArgument(range != null, "range is null");
if (maxSplits < 1)
throw new IllegalArgumentException("maximum splits must be >= 1");
if (maxSplits == 1)
return Collections.singleton(range);
Random random = new Random();
Map<String, Map<KeyExtent, List<Range>>> binnedRanges = new HashMap<>();
Table.ID tableId = Tables.getTableId(context.getInstance(), tableName);
TabletLocator tl = TabletLocator.getLocator(context, tableId);
// its possible that the cache could contain complete, but old information about a tables tablets... so clear it
tl.invalidateCache();
while (!tl.binRanges(context, Collections.singletonList(range), binnedRanges).isEmpty()) {
if (!Tables.exists(context.getInstance(), tableId))
throw new TableDeletedException(tableId.canonicalID());
if (Tables.getTableState(context.getInstance(), tableId) == TableState.OFFLINE)
throw new TableOfflineException(context.getInstance(), tableId.canonicalID());
log.warn("Unable to locate bins for specified range. Retrying.");
// sleep randomly between 100 and 200ms
sleepUninterruptibly(100 + random.nextInt(100), TimeUnit.MILLISECONDS);
binnedRanges.clear();
tl.invalidateCache();
}
// group key extents to get <= maxSplits
LinkedList<KeyExtent> unmergedExtents = new LinkedList<>();
List<KeyExtent> mergedExtents = new ArrayList<>();
for (Map<KeyExtent, List<Range>> map : binnedRanges.values()) unmergedExtents.addAll(map.keySet());
// the sort method is efficient for linked list
Collections.sort(unmergedExtents);
while (unmergedExtents.size() + mergedExtents.size() > maxSplits) {
if (unmergedExtents.size() >= 2) {
KeyExtent first = unmergedExtents.removeFirst();
KeyExtent second = unmergedExtents.removeFirst();
first.setEndRow(second.getEndRow());
mergedExtents.add(first);
} else {
mergedExtents.addAll(unmergedExtents);
unmergedExtents.clear();
unmergedExtents.addAll(mergedExtents);
mergedExtents.clear();
}
}
mergedExtents.addAll(unmergedExtents);
Set<Range> ranges = new HashSet<>();
for (KeyExtent k : mergedExtents) ranges.add(k.toDataRange().clip(range));
return ranges;
}
use of org.apache.accumulo.core.client.TableOfflineException in project accumulo by apache.
the class ConditionalWriterImpl method queue.
private void queue(List<QCMutation> mutations) {
List<QCMutation> failures = new ArrayList<>();
Map<String, TabletServerMutations<QCMutation>> binnedMutations = new HashMap<>();
try {
locator.binMutations(context, mutations, binnedMutations, failures);
if (failures.size() == mutations.size())
if (!Tables.exists(context.getInstance(), tableId))
throw new TableDeletedException(tableId.canonicalID());
else if (Tables.getTableState(context.getInstance(), tableId) == TableState.OFFLINE)
throw new TableOfflineException(context.getInstance(), tableId.canonicalID());
} catch (Exception e) {
for (QCMutation qcm : mutations) qcm.queueResult(new Result(e, qcm, null));
// do not want to queue anything that was put in before binMutations() failed
failures.clear();
binnedMutations.clear();
}
if (failures.size() > 0)
queueRetry(failures, null);
for (Entry<String, TabletServerMutations<QCMutation>> entry : binnedMutations.entrySet()) {
queue(entry.getKey(), entry.getValue());
}
}
use of org.apache.accumulo.core.client.TableOfflineException in project gora by apache.
the class AccumuloStore method getPartitions.
@Override
public List<PartitionQuery<K, T>> getPartitions(Query<K, T> query) throws GoraException {
try {
TabletLocator tl;
if (conn instanceof MockConnector)
tl = new MockTabletLocator();
else
tl = TabletLocator.getLocator(new ClientContext(conn.getInstance(), credentials, AccumuloConfiguration.getTableConfiguration(conn, Tables.getTableId(conn.getInstance(), mapping.tableName))), new Text(Tables.getTableId(conn.getInstance(), mapping.tableName)));
Map<String, Map<KeyExtent, List<Range>>> binnedRanges = new HashMap<>();
tl.invalidateCache();
while (tl.binRanges(new ClientContext(conn.getInstance(), credentials, AccumuloConfiguration.getTableConfiguration(conn, Tables.getTableId(conn.getInstance(), mapping.tableName))), Collections.singletonList(createRange(query)), binnedRanges).size() > 0) {
// TODO log?
if (!Tables.exists(conn.getInstance(), Tables.getTableId(conn.getInstance(), mapping.tableName)))
throw new TableDeletedException(Tables.getTableId(conn.getInstance(), mapping.tableName));
else if (Tables.getTableState(conn.getInstance(), Tables.getTableId(conn.getInstance(), mapping.tableName)) == TableState.OFFLINE)
throw new TableOfflineException(conn.getInstance(), Tables.getTableId(conn.getInstance(), mapping.tableName));
UtilWaitThread.sleep(100);
tl.invalidateCache();
}
List<PartitionQuery<K, T>> ret = new ArrayList<>();
Text startRow = null;
Text endRow = null;
if (query.getStartKey() != null)
startRow = new Text(toBytes(query.getStartKey()));
if (query.getEndKey() != null)
endRow = new Text(toBytes(query.getEndKey()));
// hadoop expects hostnames, accumulo keeps track of IPs... so need to convert
HashMap<String, String> hostNameCache = new HashMap<>();
for (Entry<String, Map<KeyExtent, List<Range>>> entry : binnedRanges.entrySet()) {
String ip = entry.getKey().split(":", 2)[0];
String location = hostNameCache.get(ip);
if (location == null) {
InetAddress inetAddress = InetAddress.getByName(ip);
location = inetAddress.getHostName();
hostNameCache.put(ip, location);
}
Map<KeyExtent, List<Range>> tablets = entry.getValue();
for (KeyExtent ke : tablets.keySet()) {
K startKey = null;
if (startRow == null || !ke.contains(startRow)) {
if (ke.getPrevEndRow() != null) {
startKey = followingKey(encoder, getKeyClass(), getBytes(ke.getPrevEndRow()));
}
} else {
startKey = fromBytes(getKeyClass(), getBytes(startRow));
}
K endKey = null;
if (endRow == null || !ke.contains(endRow)) {
if (ke.getEndRow() != null)
endKey = lastPossibleKey(encoder, getKeyClass(), getBytes(ke.getEndRow()));
} else {
endKey = fromBytes(getKeyClass(), getBytes(endRow));
}
PartitionQueryImpl<K, T> pqi = new PartitionQueryImpl<>(query, startKey, endKey, location);
pqi.setConf(getConf());
ret.add(pqi);
}
}
return ret;
} catch (Exception e) {
throw new GoraException(e);
}
}
Aggregations