use of org.apache.accumulo.core.clientImpl.bulk.Bulk.Files in project accumulo by apache.
the class LoadFiles method loadFiles.
/**
* Make asynchronous load calls to each overlapping Tablet in the bulk mapping. Return a sleep
* time to isReady based on a factor of the TabletServer with the most Tablets. This method will
* scan the metadata table getting Tablet range and location information. It will return 0 when
* all files have been loaded.
*/
private long loadFiles(TableId tableId, Path bulkDir, LoadMappingIterator loadMapIter, Manager manager, long tid) throws Exception {
PeekingIterator<Map.Entry<KeyExtent, Bulk.Files>> lmi = new PeekingIterator<>(loadMapIter);
Map.Entry<KeyExtent, Bulk.Files> loadMapEntry = lmi.peek();
Text startRow = loadMapEntry.getKey().prevEndRow();
Iterator<TabletMetadata> tabletIter = TabletsMetadata.builder(manager.getContext()).forTable(tableId).overlapping(startRow, null).checkConsistency().fetch(PREV_ROW, LOCATION, LOADED).build().iterator();
Loader loader;
if (bulkInfo.tableState == TableState.ONLINE) {
loader = new OnlineLoader();
} else {
loader = new OfflineLoader();
}
loader.start(bulkDir, manager, tid, bulkInfo.setTime);
long t1 = System.currentTimeMillis();
while (lmi.hasNext()) {
loadMapEntry = lmi.next();
List<TabletMetadata> tablets = findOverlappingTablets(loadMapEntry.getKey(), tabletIter);
loader.load(tablets, loadMapEntry.getValue());
}
long sleepTime = loader.finish();
if (sleepTime > 0) {
long scanTime = Math.min(System.currentTimeMillis() - t1, 30000);
sleepTime = Math.max(sleepTime, scanTime * 2);
}
return sleepTime;
}
use of org.apache.accumulo.core.clientImpl.bulk.Bulk.Files in project accumulo by apache.
the class BulkSerializeTest method testRemap.
@Test
public void testRemap() throws Exception {
TableId tableId = TableId.of("3");
SortedMap<KeyExtent, Bulk.Files> mapping = generateMapping(tableId);
SortedMap<KeyExtent, Bulk.Files> newNameMapping = new TreeMap<>();
Map<String, String> nameMap = new HashMap<>();
mapping.forEach((extent, files) -> {
Files newFiles = new Files();
files.forEach(fi -> {
newFiles.add(new FileInfo("N" + fi.name, fi.estSize, fi.estEntries));
nameMap.put(fi.name, "N" + fi.name);
});
newNameMapping.put(extent, newFiles);
});
ByteArrayOutputStream mappingBaos = new ByteArrayOutputStream();
ByteArrayOutputStream nameBaos = new ByteArrayOutputStream();
BulkSerialize.writeRenameMap(nameMap, "/some/dir", p -> nameBaos);
BulkSerialize.writeLoadMapping(mapping, "/some/dir", p -> mappingBaos);
Input input = p -> {
if (p.getName().equals(Constants.BULK_LOAD_MAPPING)) {
return new ByteArrayInputStream(mappingBaos.toByteArray());
} else if (p.getName().equals(Constants.BULK_RENAME_FILE)) {
return new ByteArrayInputStream(nameBaos.toByteArray());
} else {
throw new IllegalArgumentException("bad path " + p);
}
};
try (LoadMappingIterator lmi = BulkSerialize.getUpdatedLoadMapping("/some/dir", tableId, input)) {
SortedMap<KeyExtent, Bulk.Files> actual = new TreeMap<>();
lmi.forEachRemaining(e -> actual.put(e.getKey(), e.getValue()));
assertEquals(newNameMapping, actual);
}
}
Aggregations