use of org.apache.accumulo.core.dataImpl.KeyExtent in project accumulo by apache.
the class PopulateMetadataTable method call.
@Override
public Repo<Manager> call(long tid, Manager manager) throws Exception {
Path path = new Path(tableInfo.exportFile);
VolumeManager fs = manager.getVolumeManager();
try (BatchWriter mbw = manager.getContext().createBatchWriter(MetadataTable.NAME);
ZipInputStream zis = new ZipInputStream(fs.open(path))) {
Map<String, String> fileNameMappings = new HashMap<>();
for (ImportedTableInfo.DirectoryMapping dm : tableInfo.directories) {
log.info("importDir is " + dm.importDir);
// mappings are prefixed with the proper volume information, e.g:
// hdfs://localhost:8020/path/to/accumulo/tables/...
readMappingFile(fs, tableInfo, dm.importDir, fileNameMappings);
}
ZipEntry zipEntry;
while ((zipEntry = zis.getNextEntry()) != null) {
if (zipEntry.getName().equals(Constants.EXPORT_METADATA_FILE)) {
DataInputStream in = new DataInputStream(new BufferedInputStream(zis));
Key key = new Key();
Value val = new Value();
Mutation m = null;
Text currentRow = null;
int dirCount = 0;
while (true) {
key.readFields(in);
val.readFields(in);
Text endRow = KeyExtent.fromMetaRow(key.getRow()).endRow();
Text metadataRow = new KeyExtent(tableInfo.tableId, endRow, null).toMetaRow();
Text cq;
if (key.getColumnFamily().equals(DataFileColumnFamily.NAME)) {
String oldName = new Path(key.getColumnQualifier().toString()).getName();
String newName = fileNameMappings.get(oldName);
if (newName == null) {
throw new AcceptableThriftTableOperationException(tableInfo.tableId.canonical(), tableInfo.tableName, TableOperation.IMPORT, TableOperationExceptionType.OTHER, "File " + oldName + " does not exist in import dir");
}
cq = new Text(newName);
} else {
cq = key.getColumnQualifier();
}
if (m == null || !currentRow.equals(metadataRow)) {
if (m != null) {
mbw.addMutation(m);
}
// Make a unique directory inside the table's dir. Cannot import multiple tables
// into one table, so don't need to use unique allocator
String tabletDir = new String(FastFormat.toZeroPaddedString(dirCount++, 8, 16, Constants.CLONE_PREFIX_BYTES), UTF_8);
m = new Mutation(metadataRow);
ServerColumnFamily.DIRECTORY_COLUMN.put(m, new Value(tabletDir));
currentRow = metadataRow;
}
m.put(key.getColumnFamily(), cq, val);
if (endRow == null && TabletColumnFamily.PREV_ROW_COLUMN.hasColumns(key)) {
mbw.addMutation(m);
// its the last column in the last row
break;
}
}
break;
}
}
return new MoveExportedFiles(tableInfo);
} catch (IOException ioe) {
log.warn("{}", ioe.getMessage(), ioe);
throw new AcceptableThriftTableOperationException(tableInfo.tableId.canonical(), tableInfo.tableName, TableOperation.IMPORT, TableOperationExceptionType.OTHER, "Error reading " + path + " " + ioe.getMessage());
}
}
use of org.apache.accumulo.core.dataImpl.KeyExtent in project accumulo by apache.
the class PrepBulkImportTest method testException.
@Test
public void testException() throws Exception {
for (List<KeyExtent> loadRanges : powerSet(nke(null, "b"), nke("b", "m"), nke("m", "r"), nke("r", "v"), nke("v", null))) {
if (loadRanges.isEmpty()) {
continue;
}
Set<String> rows = new HashSet<>();
for (KeyExtent ke : loadRanges) {
if (ke.prevEndRow() != null) {
rows.add(ke.prevEndRow().toString());
}
if (ke.endRow() != null) {
rows.add(ke.endRow().toString());
}
}
for (String row : rows) {
Set<String> rows2 = new HashSet<>(rows);
rows2.remove(row);
// test will all but one of the rows in the load mapping
for (Set<String> otherRows : Sets.powerSet(Set.of("a", "c", "q", "t", "x"))) {
runExceptionTest(loadRanges, createExtents(Iterables.concat(rows2, otherRows)));
}
}
if (rows.size() > 1) {
// test with none of the rows in the load mapping
for (Set<String> otherRows : Sets.powerSet(Set.of("a", "c", "q", "t", "x"))) {
runExceptionTest(loadRanges, createExtents(otherRows));
}
}
}
}
use of org.apache.accumulo.core.dataImpl.KeyExtent in project accumulo by apache.
the class PrepBulkImportTest method nke.
KeyExtent nke(String prev, String end) {
Text per = prev == null ? null : new Text(prev);
Text er = end == null ? null : new Text(end);
return new KeyExtent(TableId.of("1"), er, per);
}
use of org.apache.accumulo.core.dataImpl.KeyExtent in project accumulo by apache.
the class PrepBulkImportTest method createLoadMappingIter.
private LoadMappingIterator createLoadMappingIter(Map<KeyExtent, String> loadRanges) throws IOException {
SortedMap<KeyExtent, Bulk.Files> mapping = new TreeMap<>();
loadRanges.forEach((extent, files) -> {
Bulk.Files testFiles = new Bulk.Files();
long c = 0L;
for (String f : files.split(" ")) {
c++;
testFiles.add(new Bulk.FileInfo(f, c, c));
}
mapping.put(extent, testFiles);
});
ByteArrayOutputStream baos = new ByteArrayOutputStream();
BulkSerialize.writeLoadMapping(mapping, "/some/dir", p -> baos);
ByteArrayInputStream bais = new ByteArrayInputStream(baos.toByteArray());
LoadMappingIterator lmi = BulkSerialize.readLoadMapping("/some/dir", TableId.of("1"), p -> bais);
return lmi;
}
use of org.apache.accumulo.core.dataImpl.KeyExtent in project accumulo by apache.
the class CleanUp method isReady.
@Override
public long isReady(long tid, Manager manager) throws Exception {
if (!manager.hasCycled(creationTime)) {
return 50;
}
boolean done = true;
Range tableRange = new KeyExtent(tableId, null, null).toMetaRange();
Scanner scanner = manager.getContext().createScanner(MetadataTable.NAME, Authorizations.EMPTY);
MetaDataTableScanner.configureScanner(scanner, manager);
scanner.setRange(tableRange);
for (Entry<Key, Value> entry : scanner) {
TabletLocationState locationState = MetaDataTableScanner.createTabletLocationState(entry.getKey(), entry.getValue());
TabletState state = locationState.getState(manager.onlineTabletServers());
if (!state.equals(TabletState.UNASSIGNED)) {
// This code will even wait on tablets that are assigned to dead tablets servers. This is
// intentional because the manager may make metadata writes for these tablets. See #587
log.debug("Still waiting for table({}) to be deleted; Target tablet state: UNASSIGNED, " + "Current tablet state: {}, locationState: {}", tableId, state, locationState);
done = false;
break;
}
}
if (!done)
return 50;
return 0;
}
Aggregations