use of org.apache.accumulo.core.dataImpl.KeyExtent in project accumulo by apache.
the class PopulateMetadata method call.
@Override
public Repo<Manager> call(long tid, Manager env) throws Exception {
KeyExtent extent = new KeyExtent(tableInfo.getTableId(), null, null);
MetadataTableUtil.addTablet(extent, ServerColumnFamily.DEFAULT_TABLET_DIR_NAME, env.getContext(), tableInfo.getTimeType(), env.getManagerLock());
if (tableInfo.getInitialSplitSize() > 0) {
SortedSet<Text> splits = Utils.getSortedSetFromFile(env, tableInfo.getSplitPath(), true);
SortedSet<Text> dirs = Utils.getSortedSetFromFile(env, tableInfo.getSplitDirsPath(), false);
Map<Text, Text> splitDirMap = createSplitDirectoryMap(splits, dirs);
try (BatchWriter bw = env.getContext().createBatchWriter(MetadataTable.NAME)) {
writeSplitsToMetadataTable(env.getContext(), tableInfo.getTableId(), splits, splitDirMap, tableInfo.getTimeType(), env.getManagerLock(), bw);
}
}
return new FinishCreateTable(tableInfo);
}
use of org.apache.accumulo.core.dataImpl.KeyExtent in project accumulo by apache.
the class BulkFailureIT method runTest.
/**
* This test verifies two things. First it ensures that after a bulk imported file is compacted
* that import request are ignored. Second it ensures that after the bulk import transaction is
* canceled that import request fail. The public API for bulk import can not be used for this
* test. Internal (non public API) RPCs and Zookeeper state is manipulated directly. This is the
* only way to interleave compactions with multiple, duplicate import RPC request.
*/
protected void runTest(String table, long fateTxid, Loader loader) throws IOException, AccumuloException, AccumuloSecurityException, TableExistsException, KeeperException, InterruptedException, Exception, FileNotFoundException, TableNotFoundException {
try (AccumuloClient c = Accumulo.newClient().from(getClientProps()).build()) {
SortedMap<Key, Value> testData = createTestData();
FileSystem fs = getCluster().getFileSystem();
String testFile = createTestFile(fateTxid, testData, fs);
c.tableOperations().create(table);
String tableId = c.tableOperations().tableIdMap().get(table);
// Table has no splits, so this extent corresponds to the tables single tablet
KeyExtent extent = new KeyExtent(TableId.of(tableId), null, null);
ServerContext asCtx = getServerContext();
ZooArbitrator.start(asCtx, Constants.BULK_ARBITRATOR_TYPE, fateTxid);
VolumeManager vm = asCtx.getVolumeManager();
// move the file into a directory for the table and rename the file to something unique
String bulkDir = BulkImport.prepareBulkImport(asCtx, vm, testFile, TableId.of(tableId), fateTxid);
// determine the files new name and path
FileStatus status = fs.listStatus(new Path(bulkDir))[0];
Path bulkLoadPath = fs.makeQualified(status.getPath());
// Directly ask the tablet to load the file.
loader.load(fateTxid, asCtx, extent, bulkLoadPath, status.getLen(), false);
assertEquals(Set.of(bulkLoadPath), getFiles(c, extent));
assertEquals(Set.of(bulkLoadPath), getLoaded(c, extent));
assertEquals(testData, readTable(table, c));
// Compact the bulk imported file. Subsequent request to load the file should be ignored.
c.tableOperations().compact(table, new CompactionConfig().setWait(true));
Set<Path> tabletFiles = getFiles(c, extent);
assertFalse(tabletFiles.contains(bulkLoadPath));
assertEquals(1, tabletFiles.size());
assertEquals(Set.of(bulkLoadPath), getLoaded(c, extent));
assertEquals(testData, readTable(table, c));
// this request should be ignored by the tablet
loader.load(fateTxid, asCtx, extent, bulkLoadPath, status.getLen(), false);
assertEquals(tabletFiles, getFiles(c, extent));
assertEquals(Set.of(bulkLoadPath), getLoaded(c, extent));
assertEquals(testData, readTable(table, c));
// this is done to ensure the tablet reads the load flags from the metadata table when it
// loads
c.tableOperations().offline(table, true);
c.tableOperations().online(table, true);
// this request should be ignored by the tablet
loader.load(fateTxid, asCtx, extent, bulkLoadPath, status.getLen(), false);
assertEquals(tabletFiles, getFiles(c, extent));
assertEquals(Set.of(bulkLoadPath), getLoaded(c, extent));
assertEquals(testData, readTable(table, c));
// After this, all load request should fail.
ZooArbitrator.stop(asCtx, Constants.BULK_ARBITRATOR_TYPE, fateTxid);
c.securityOperations().grantTablePermission(c.whoami(), MetadataTable.NAME, TablePermission.WRITE);
BatchDeleter bd = c.createBatchDeleter(MetadataTable.NAME, Authorizations.EMPTY, 1);
bd.setRanges(Collections.singleton(extent.toMetaRange()));
bd.fetchColumnFamily(BulkFileColumnFamily.NAME);
bd.delete();
loader.load(fateTxid, asCtx, extent, bulkLoadPath, status.getLen(), true);
assertEquals(tabletFiles, getFiles(c, extent));
assertEquals(Set.of(), getLoaded(c, extent));
assertEquals(testData, readTable(table, c));
}
}
use of org.apache.accumulo.core.dataImpl.KeyExtent in project accumulo by apache.
the class CollectTabletStats method findTablets.
private static List<KeyExtent> findTablets(ClientContext context, boolean selectLocalTablets, String tableName, SortedMap<KeyExtent, String> tabletLocations) throws Exception {
TableId tableId = context.getTableId(tableName);
MetadataServicer.forTableId(context, tableId).getTabletLocations(tabletLocations);
InetAddress localaddress = InetAddress.getLocalHost();
List<KeyExtent> candidates = new ArrayList<>();
for (Entry<KeyExtent, String> entry : tabletLocations.entrySet()) {
String loc = entry.getValue();
if (loc != null) {
boolean isLocal = HostAndPort.fromString(entry.getValue()).getHost().equals(localaddress.getHostName());
if (selectLocalTablets && isLocal) {
candidates.add(entry.getKey());
} else if (!selectLocalTablets && !isLocal) {
candidates.add(entry.getKey());
}
}
}
return candidates;
}
use of org.apache.accumulo.core.dataImpl.KeyExtent in project accumulo by apache.
the class InputConfigurator method binOffline.
public static Map<String, Map<KeyExtent, List<Range>>> binOffline(TableId tableId, List<Range> ranges, ClientContext context) throws AccumuloException, TableNotFoundException {
Map<String, Map<KeyExtent, List<Range>>> binnedRanges = new HashMap<>();
if (context.getTableState(tableId) != TableState.OFFLINE) {
context.clearTableListCache();
if (context.getTableState(tableId) != TableState.OFFLINE) {
throw new AccumuloException("Table is online tableId:" + tableId + " cannot scan table in offline mode ");
}
}
for (Range range : ranges) {
Text startRow;
if (range.getStartKey() != null)
startRow = range.getStartKey().getRow();
else
startRow = new Text();
Range metadataRange = new Range(new KeyExtent(tableId, startRow, null).toMetaRow(), true, null, false);
Scanner scanner = context.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
TabletColumnFamily.PREV_ROW_COLUMN.fetch(scanner);
scanner.fetchColumnFamily(LastLocationColumnFamily.NAME);
scanner.fetchColumnFamily(CurrentLocationColumnFamily.NAME);
scanner.fetchColumnFamily(FutureLocationColumnFamily.NAME);
scanner.setRange(metadataRange);
RowIterator rowIter = new RowIterator(scanner);
KeyExtent lastExtent = null;
while (rowIter.hasNext()) {
Iterator<Map.Entry<Key, Value>> row = rowIter.next();
String last = "";
KeyExtent extent = null;
String location = null;
while (row.hasNext()) {
Map.Entry<Key, Value> entry = row.next();
Key key = entry.getKey();
if (key.getColumnFamily().equals(LastLocationColumnFamily.NAME)) {
last = entry.getValue().toString();
}
if (key.getColumnFamily().equals(CurrentLocationColumnFamily.NAME) || key.getColumnFamily().equals(FutureLocationColumnFamily.NAME)) {
location = entry.getValue().toString();
}
if (TabletColumnFamily.PREV_ROW_COLUMN.hasColumns(key)) {
extent = KeyExtent.fromMetaPrevRow(entry);
}
}
if (location != null)
return null;
if (!extent.tableId().equals(tableId)) {
throw new AccumuloException("Saw unexpected table Id " + tableId + " " + extent);
}
if (lastExtent != null && !extent.isPreviousExtent(lastExtent)) {
throw new AccumuloException(" " + lastExtent + " is not previous extent " + extent);
}
binnedRanges.computeIfAbsent(last, k -> new HashMap<>()).computeIfAbsent(extent, k -> new ArrayList<>()).add(range);
if (extent.endRow() == null || range.afterEndKey(new Key(extent.endRow()).followingKey(PartialKey.ROW))) {
break;
}
lastExtent = extent;
}
}
return binnedRanges;
}
use of org.apache.accumulo.core.dataImpl.KeyExtent in project accumulo by apache.
the class BulkImporter method processFailures.
private Set<Path> processFailures(Map<Path, List<KeyExtent>> completeFailures) {
// we should check if map file was not assigned to any tablets, then we
// should just move it; not currently being done?
Set<Entry<Path, List<KeyExtent>>> es = completeFailures.entrySet();
if (completeFailures.isEmpty())
return Collections.emptySet();
log.debug("The following map files failed ");
for (Entry<Path, List<KeyExtent>> entry : es) {
List<KeyExtent> extents = entry.getValue();
for (KeyExtent keyExtent : extents) log.debug("\t{} -> {}", entry.getKey(), keyExtent);
}
return Collections.emptySet();
}
Aggregations