use of org.apache.accumulo.core.dataImpl.KeyExtent in project accumulo by apache.
the class ThriftClientHandler method getTabletStats.
@Override
public List<TabletStats> getTabletStats(TInfo tinfo, TCredentials credentials, String tableId) {
List<TabletStats> result = new ArrayList<>();
TableId text = TableId.of(tableId);
KeyExtent start = new KeyExtent(text, new Text(), null);
for (Entry<KeyExtent, Tablet> entry : server.getOnlineTablets().tailMap(start).entrySet()) {
KeyExtent ke = entry.getKey();
if (ke.tableId().compareTo(text) == 0) {
Tablet tablet = entry.getValue();
TabletStats stats = tablet.getTabletStats();
stats.extent = ke.toThrift();
stats.ingestRate = tablet.ingestRate();
stats.queryRate = tablet.queryRate();
stats.splitCreationTime = tablet.getSplitCreationTime();
stats.numEntries = tablet.getNumEntries();
result.add(stats);
}
}
return result;
}
use of org.apache.accumulo.core.dataImpl.KeyExtent in project accumulo by apache.
the class TabletServer method splitTablet.
TreeMap<KeyExtent, TabletData> splitTablet(Tablet tablet, byte[] splitPoint) throws IOException {
long t1 = System.currentTimeMillis();
TreeMap<KeyExtent, TabletData> tabletInfo = tablet.split(splitPoint);
if (tabletInfo == null) {
return null;
}
log.info("Starting split: {}", tablet.getExtent());
statsKeeper.incrementStatusSplit();
long start = System.currentTimeMillis();
Tablet[] newTablets = new Tablet[2];
Entry<KeyExtent, TabletData> first = tabletInfo.firstEntry();
TabletResourceManager newTrm0 = resourceManager.createTabletResourceManager(first.getKey(), getTableConfiguration(first.getKey()));
newTablets[0] = new Tablet(TabletServer.this, first.getKey(), newTrm0, first.getValue());
Entry<KeyExtent, TabletData> last = tabletInfo.lastEntry();
TabletResourceManager newTrm1 = resourceManager.createTabletResourceManager(last.getKey(), getTableConfiguration(last.getKey()));
newTablets[1] = new Tablet(TabletServer.this, last.getKey(), newTrm1, last.getValue());
// roll tablet stats over into tablet server's statsKeeper object as
// historical data
statsKeeper.saveMajorMinorTimes(tablet.getTabletStats());
// lose the reference to the old tablet and open two new ones
onlineTablets.split(tablet.getExtent(), newTablets[0], newTablets[1]);
// tell the manager
enqueueManagerMessage(new SplitReportMessage(tablet.getExtent(), newTablets[0].getExtent(), new Text("/" + newTablets[0].getDirName()), newTablets[1].getExtent(), new Text("/" + newTablets[1].getDirName())));
statsKeeper.updateTime(Operation.SPLIT, start, false);
long t2 = System.currentTimeMillis();
log.info("Tablet split: {} size0 {} size1 {} time {}ms", tablet.getExtent(), newTablets[0].estimateTabletSize(), newTablets[1].estimateTabletSize(), (t2 - t1));
return tabletInfo;
}
use of org.apache.accumulo.core.dataImpl.KeyExtent in project accumulo by apache.
the class ListTabletsCommandTest method builderTest.
@Test
public void builderTest() {
TableId id = TableId.of("123");
Text startRow = new Text("a");
Text endRow = new Text("z");
KeyExtent ke = new KeyExtent(id, endRow, startRow);
TabletMetadata.Location loc = new TabletMetadata.Location("localhost", "", TabletMetadata.LocationType.CURRENT);
ListTabletsCommand.TabletRowInfo.Factory factory = new ListTabletsCommand.TabletRowInfo.Factory("aName", ke).numFiles(1).numWalLogs(2).numEntries(3).size(4).status(TabletState.HOSTED.toString()).location(loc).tableExists(true);
ListTabletsCommand.TabletRowInfo info = factory.build();
assertEquals("aName", info.tableName);
assertEquals(1, info.numFiles);
assertEquals(2, info.numWalLogs);
assertEquals("3", info.getNumEntries(false));
assertEquals(3, info.numEntries);
assertEquals("4", info.getSize(false));
assertEquals(4, info.size);
assertEquals("HOSTED", info.status);
assertEquals("CURRENT:localhost", info.location);
assertEquals(TableId.of("123"), info.tableId);
assertEquals(startRow + " " + endRow, info.getTablet());
assertTrue(info.tableExists);
}
use of org.apache.accumulo.core.dataImpl.KeyExtent in project accumulo by apache.
the class AccumuloRecordReader method getSplits.
public static List<InputSplit> getSplits(JobContext context, Class<?> callingClass) throws IOException {
validateOptions(context, callingClass);
LinkedList<InputSplit> splits = new LinkedList<>();
try (AccumuloClient client = createClient(context, callingClass);
var clientContext = ((ClientContext) client)) {
Map<String, InputTableConfig> tableConfigs = InputConfigurator.getInputTableConfigs(callingClass, context.getConfiguration());
for (Map.Entry<String, InputTableConfig> tableConfigEntry : tableConfigs.entrySet()) {
String tableName = tableConfigEntry.getKey();
InputTableConfig tableConfig = tableConfigEntry.getValue();
TableId tableId;
// resolve table name to id once, and use id from this point forward
try {
tableId = clientContext.getTableId(tableName);
} catch (TableNotFoundException e) {
throw new IOException(e);
}
boolean batchScan = InputConfigurator.isBatchScan(callingClass, context.getConfiguration());
boolean supportBatchScan = !(tableConfig.isOfflineScan() || tableConfig.shouldUseIsolatedScanners() || tableConfig.shouldUseLocalIterators());
if (batchScan && !supportBatchScan)
throw new IllegalArgumentException("BatchScanner optimization not available for offline" + " scan, isolated, or local iterators");
boolean autoAdjust = tableConfig.shouldAutoAdjustRanges();
if (batchScan && !autoAdjust)
throw new IllegalArgumentException("AutoAdjustRanges must be enabled when using BatchScanner optimization");
List<Range> ranges = autoAdjust ? Range.mergeOverlapping(tableConfig.getRanges()) : tableConfig.getRanges();
if (ranges.isEmpty()) {
ranges = new ArrayList<>(1);
ranges.add(new Range());
}
// get the metadata information for these ranges
Map<String, Map<KeyExtent, List<Range>>> binnedRanges = new HashMap<>();
TabletLocator tl;
try {
if (tableConfig.isOfflineScan()) {
binnedRanges = binOfflineTable(context, tableId, ranges, callingClass);
while (binnedRanges == null) {
// Some tablets were still online, try again
// sleep randomly between 100 and 200 ms
sleepUninterruptibly(100 + random.nextInt(100), TimeUnit.MILLISECONDS);
binnedRanges = binOfflineTable(context, tableId, ranges, callingClass);
}
} else {
tl = InputConfigurator.getTabletLocator(callingClass, context.getConfiguration(), tableId);
// its possible that the cache could contain complete, but old information about a
// tables tablets... so clear it
tl.invalidateCache();
while (!tl.binRanges(clientContext, ranges, binnedRanges).isEmpty()) {
clientContext.requireNotDeleted(tableId);
clientContext.requireNotOffline(tableId, tableName);
binnedRanges.clear();
log.warn("Unable to locate bins for specified ranges. Retrying.");
// sleep randomly between 100 and 200 ms
sleepUninterruptibly(100 + random.nextInt(100), TimeUnit.MILLISECONDS);
tl.invalidateCache();
}
}
} catch (TableOfflineException | TableNotFoundException | AccumuloException | AccumuloSecurityException e) {
throw new IOException(e);
}
// all of this code will add either range per each locations or split ranges and add
// range-location split
// Map from Range to Array of Locations, we only use this if we're don't split
HashMap<Range, ArrayList<String>> splitsToAdd = null;
if (!autoAdjust)
splitsToAdd = new HashMap<>();
HashMap<String, String> hostNameCache = new HashMap<>();
for (Map.Entry<String, Map<KeyExtent, List<Range>>> tserverBin : binnedRanges.entrySet()) {
String ip = tserverBin.getKey().split(":", 2)[0];
String location = hostNameCache.get(ip);
if (location == null) {
InetAddress inetAddress = InetAddress.getByName(ip);
location = inetAddress.getCanonicalHostName();
hostNameCache.put(ip, location);
}
for (Map.Entry<KeyExtent, List<Range>> extentRanges : tserverBin.getValue().entrySet()) {
Range ke = extentRanges.getKey().toDataRange();
if (batchScan) {
// group ranges by tablet to be read by a BatchScanner
ArrayList<Range> clippedRanges = new ArrayList<>();
for (Range r : extentRanges.getValue()) clippedRanges.add(ke.clip(r));
BatchInputSplit split = new BatchInputSplit(tableName, tableId, clippedRanges, new String[] { location });
SplitUtils.updateSplit(split, tableConfig);
splits.add(split);
} else {
// not grouping by tablet
for (Range r : extentRanges.getValue()) {
if (autoAdjust) {
// divide ranges into smaller ranges, based on the tablets
RangeInputSplit split = new RangeInputSplit(tableName, tableId.canonical(), ke.clip(r), new String[] { location });
SplitUtils.updateSplit(split, tableConfig);
split.setOffline(tableConfig.isOfflineScan());
split.setIsolatedScan(tableConfig.shouldUseIsolatedScanners());
split.setUsesLocalIterators(tableConfig.shouldUseLocalIterators());
splits.add(split);
} else {
// don't divide ranges
ArrayList<String> locations = splitsToAdd.get(r);
if (locations == null)
locations = new ArrayList<>(1);
locations.add(location);
splitsToAdd.put(r, locations);
}
}
}
}
}
if (!autoAdjust)
for (Map.Entry<Range, ArrayList<String>> entry : splitsToAdd.entrySet()) {
RangeInputSplit split = new RangeInputSplit(tableName, tableId.canonical(), entry.getKey(), entry.getValue().toArray(new String[0]));
SplitUtils.updateSplit(split, tableConfig);
split.setOffline(tableConfig.isOfflineScan());
split.setIsolatedScan(tableConfig.shouldUseIsolatedScanners());
split.setUsesLocalIterators(tableConfig.shouldUseLocalIterators());
splits.add(split);
}
}
}
return splits;
}
use of org.apache.accumulo.core.dataImpl.KeyExtent in project accumulo by apache.
the class LogEntryTest method test.
@Test
public void test() throws Exception {
KeyExtent extent = new KeyExtent(TableId.of("1"), null, null);
long ts = 12345678L;
String filename = "default/foo";
LogEntry entry = new LogEntry(extent, ts, filename);
assertEquals(extent.toMetaRow(), entry.getRow());
assertEquals(filename, entry.filename);
assertEquals(ts, entry.timestamp);
assertEquals("1< default/foo", entry.toString());
assertEquals(new Text("log"), entry.getColumnFamily());
assertEquals(new Text("-/default/foo"), entry.getColumnQualifier());
@SuppressWarnings("removal") LogEntry copy = LogEntry.fromBytes(entry.toBytes());
assertEquals(entry.toString(), copy.toString());
Key key = new Key(new Text("1<"), new Text("log"), new Text("localhost:1234/default/foo"));
key.setTimestamp(ts);
var mapEntry = new Entry<Key, Value>() {
@Override
public Key getKey() {
return key;
}
@Override
public Value getValue() {
return entry.getValue();
}
@Override
public Value setValue(Value value) {
throw new UnsupportedOperationException();
}
};
LogEntry copy2 = LogEntry.fromMetaWalEntry(mapEntry);
assertEquals(entry.toString(), copy2.toString());
assertEquals(entry.timestamp, copy2.timestamp);
assertEquals("foo", entry.getUniqueID());
assertEquals("-/default/foo", entry.getColumnQualifier().toString());
assertEquals(new Value("default/foo"), entry.getValue());
}
Aggregations