use of org.apache.accumulo.core.data.TableId in project accumulo by apache.
the class TableZooHelper method _getTableIdDetectNamespaceNotFound.
/**
* Lookup table ID in ZK. If not found, clears cache and tries again.
*/
public TableId _getTableIdDetectNamespaceNotFound(String tableName) throws NamespaceNotFoundException, TableNotFoundException {
TableId tableId = getTableMap().getNameToIdMap().get(tableName);
if (tableId == null) {
// maybe the table exist, but the cache was not updated yet...
// so try to clear the cache and check again
clearTableListCache();
tableId = getTableMap().getNameToIdMap().get(tableName);
if (tableId == null) {
String namespace = TableNameUtil.qualify(tableName).getFirst();
if (Namespaces.getNameToIdMap(context).containsKey(namespace))
throw new TableNotFoundException(null, tableName, null);
else
throw new NamespaceNotFoundException(null, namespace, null);
}
}
return tableId;
}
use of org.apache.accumulo.core.data.TableId in project accumulo by apache.
the class Merge method getSizeIterator.
protected Iterator<Size> getSizeIterator(AccumuloClient client, String tablename, Text start, Text end) throws MergeException {
// open up metadata, walk through the tablets.
TableId tableId;
TabletsMetadata tablets;
try {
ClientContext context = (ClientContext) client;
tableId = context.getTableId(tablename);
tablets = TabletsMetadata.builder(context).scanMetadataTable().overRange(new KeyExtent(tableId, end, start).toMetaRange()).fetch(FILES, PREV_ROW).build();
} catch (Exception e) {
throw new MergeException(e);
}
return tablets.stream().map(tm -> {
long size = tm.getFilesMap().values().stream().mapToLong(DataFileValue::getSize).sum();
return new Size(tm.getExtent(), size);
}).iterator();
}
use of org.apache.accumulo.core.data.TableId in project accumulo by apache.
the class BulkSerializeTest method testRemap.
@Test
public void testRemap() throws Exception {
TableId tableId = TableId.of("3");
SortedMap<KeyExtent, Bulk.Files> mapping = generateMapping(tableId);
SortedMap<KeyExtent, Bulk.Files> newNameMapping = new TreeMap<>();
Map<String, String> nameMap = new HashMap<>();
mapping.forEach((extent, files) -> {
Files newFiles = new Files();
files.forEach(fi -> {
newFiles.add(new FileInfo("N" + fi.name, fi.estSize, fi.estEntries));
nameMap.put(fi.name, "N" + fi.name);
});
newNameMapping.put(extent, newFiles);
});
ByteArrayOutputStream mappingBaos = new ByteArrayOutputStream();
ByteArrayOutputStream nameBaos = new ByteArrayOutputStream();
BulkSerialize.writeRenameMap(nameMap, "/some/dir", p -> nameBaos);
BulkSerialize.writeLoadMapping(mapping, "/some/dir", p -> mappingBaos);
Input input = p -> {
if (p.getName().equals(Constants.BULK_LOAD_MAPPING)) {
return new ByteArrayInputStream(mappingBaos.toByteArray());
} else if (p.getName().equals(Constants.BULK_RENAME_FILE)) {
return new ByteArrayInputStream(nameBaos.toByteArray());
} else {
throw new IllegalArgumentException("bad path " + p);
}
};
try (LoadMappingIterator lmi = BulkSerialize.getUpdatedLoadMapping("/some/dir", tableId, input)) {
SortedMap<KeyExtent, Bulk.Files> actual = new TreeMap<>();
lmi.forEachRemaining(e -> actual.put(e.getKey(), e.getValue()));
assertEquals(newNameMapping, actual);
}
}
use of org.apache.accumulo.core.data.TableId in project accumulo by apache.
the class BulkSerializeTest method writeReadLoadMapping.
@Test
public void writeReadLoadMapping() throws Exception {
TableId tableId = TableId.of("3");
SortedMap<KeyExtent, Bulk.Files> mapping = generateMapping(tableId);
ByteArrayOutputStream baos = new ByteArrayOutputStream();
BulkSerialize.writeLoadMapping(mapping, "/some/dir", p -> baos);
ByteArrayInputStream bais = new ByteArrayInputStream(baos.toByteArray());
try (LoadMappingIterator lmi = BulkSerialize.readLoadMapping("/some/dir", tableId, p -> bais)) {
SortedMap<KeyExtent, Bulk.Files> readMapping = new TreeMap<>();
lmi.forEachRemaining(e -> readMapping.put(e.getKey(), e.getValue()));
assertEquals(mapping, readMapping);
}
}
use of org.apache.accumulo.core.data.TableId in project accumulo by apache.
the class ReplicationSchemaTest method extractTableId.
@Test
public void extractTableId() {
TableId tableId = TableId.of("1");
Key k = new Key(new Text("foo"), StatusSection.NAME, new Text(tableId.canonical()));
assertEquals(tableId, StatusSection.getTableId(k));
}
Aggregations