use of org.apache.accumulo.core.dataImpl.KeyExtent in project accumulo by apache.
the class TabletLocatorImplTest method testBinMutations3.
@Test
public void testBinMutations3() throws Exception {
// three tablet table
KeyExtent ke1 = nke("foo", "h", null);
KeyExtent ke2 = nke("foo", "t", "h");
KeyExtent ke3 = nke("foo", null, "t");
TabletLocatorImpl metaCache = createLocators("foo", ke1, "l1", ke2, "l2", ke3, "l3");
List<Mutation> ml = nml(nm("a", "cf1:cq1=v1", "cf1:cq2=v2"), nm("i", "cf1:cq1=v3", "cf1:cq2=v4"));
Map<String, Map<KeyExtent, List<String>>> emb = cemb(nol("a", "l1", ke1), nol("i", "l2", ke2));
runTest(metaCache, ml, emb);
ml = nml(nm("a", "cf1:cq1=v1", "cf1:cq2=v2"));
emb = cemb(nol("a", "l1", ke1));
runTest(metaCache, ml, emb);
ml = nml(nm("a", "cf1:cq1=v1", "cf1:cq2=v2"), nm("a", "cf1:cq3=v3"));
emb = cemb(nol("a", "l1", ke1), nol("a", "l1", ke1));
runTest(metaCache, ml, emb);
ml = nml(nm("a", "cf1:cq1=v1", "cf1:cq2=v2"), nm("w", "cf1:cq3=v3"));
emb = cemb(nol("a", "l1", ke1), nol("w", "l3", ke3));
runTest(metaCache, ml, emb);
ml = nml(nm("a", "cf1:cq1=v1", "cf1:cq2=v2"), nm("w", "cf1:cq3=v3"), nm("z", "cf1:cq4=v4"));
emb = cemb(nol("a", "l1", ke1), nol("w", "l3", ke3), nol("z", "l3", ke3));
runTest(metaCache, ml, emb);
ml = nml(nm("h", "cf1:cq1=v1", "cf1:cq2=v2"), nm("t", "cf1:cq1=v1", "cf1:cq2=v2"));
emb = cemb(nol("h", "l1", ke1), nol("t", "l2", ke2));
runTest(metaCache, ml, emb);
}
use of org.apache.accumulo.core.dataImpl.KeyExtent in project accumulo by apache.
the class TabletLocatorImplTest method testAccumulo1248.
@Test
public void testAccumulo1248() {
TServers tservers = new TServers();
TabletLocatorImpl metaCache = createLocators(tservers, "tserver1", "tserver2", "foo");
KeyExtent ke1 = nke("foo", null, null);
// set two locations for a tablet, this is not supposed to happen. The metadata cache should
// throw an exception if it sees this rather than caching one of
// the locations.
setLocation(tservers, "tserver2", MTE, ke1, "L1", "I1");
setLocation(tservers, "tserver2", MTE, ke1, "L2", "I2");
var e = assertThrows(IllegalStateException.class, () -> metaCache.locateTablet(context, new Text("a"), false, false));
assertTrue(e.getMessage().startsWith("Tablet has multiple locations : "));
}
use of org.apache.accumulo.core.dataImpl.KeyExtent in project accumulo by apache.
the class TabletLocatorImplTest method createMetaCacheKE.
static TreeMap<KeyExtent, TabletLocation> createMetaCacheKE(Object... data) {
TreeMap<KeyExtent, TabletLocation> mcke = new TreeMap<>();
for (int i = 0; i < data.length; i += 2) {
KeyExtent ke = (KeyExtent) data[i];
String loc = (String) data[i + 1];
mcke.put(ke, new TabletLocation(ke, loc, "1"));
}
return mcke;
}
use of org.apache.accumulo.core.dataImpl.KeyExtent in project accumulo by apache.
the class BulkImport method load.
@Override
public void load() throws TableNotFoundException, IOException, AccumuloException, AccumuloSecurityException {
TableId tableId = context.getTableId(tableName);
FileSystem fs = VolumeConfiguration.fileSystemForPath(dir, context.getHadoopConf());
Path srcPath = checkPath(fs, dir);
SortedMap<KeyExtent, Bulk.Files> mappings;
TableOperationsImpl tableOps = new TableOperationsImpl(context);
int maxTablets = 0;
for (var prop : tableOps.getProperties(tableName)) {
if (prop.getKey().equals(Property.TABLE_BULK_MAX_TABLETS.getKey())) {
maxTablets = Integer.parseInt(prop.getValue());
break;
}
}
Retry retry = Retry.builder().infiniteRetries().retryAfter(100, MILLISECONDS).incrementBy(100, MILLISECONDS).maxWait(2, MINUTES).backOffFactor(1.5).logInterval(3, MINUTES).createRetry();
// retry if a merge occurs
boolean shouldRetry = true;
while (shouldRetry) {
if (plan == null) {
mappings = computeMappingFromFiles(fs, tableId, srcPath, maxTablets);
} else {
mappings = computeMappingFromPlan(fs, tableId, srcPath, maxTablets);
}
if (mappings.isEmpty()) {
if (ignoreEmptyDir == true) {
log.info("Attempted to import files from empty directory - {}. Zero files imported", srcPath);
return;
} else {
throw new IllegalArgumentException("Attempted to import zero files from " + srcPath);
}
}
BulkSerialize.writeLoadMapping(mappings, srcPath.toString(), fs::create);
List<ByteBuffer> args = Arrays.asList(ByteBuffer.wrap(tableId.canonical().getBytes(UTF_8)), ByteBuffer.wrap(srcPath.toString().getBytes(UTF_8)), ByteBuffer.wrap((setTime + "").getBytes(UTF_8)));
try {
tableOps.doBulkFateOperation(args, tableName);
shouldRetry = false;
} catch (AccumuloBulkMergeException ae) {
if (plan != null) {
checkPlanForSplits(ae);
}
try {
retry.waitForNextAttempt();
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
log.info(ae.getMessage() + ". Retrying bulk import to " + tableName);
}
}
}
use of org.apache.accumulo.core.dataImpl.KeyExtent in project accumulo by apache.
the class BulkImport method estimateSizes.
public static Map<KeyExtent, Long> estimateSizes(AccumuloConfiguration acuConf, Path mapFile, long fileSize, Collection<KeyExtent> extents, FileSystem ns, Cache<String, Long> fileLenCache, CryptoService cs) throws IOException {
if (extents.size() == 1) {
return Collections.singletonMap(extents.iterator().next(), fileSize);
}
long totalIndexEntries = 0;
Map<KeyExtent, MLong> counts = new TreeMap<>();
for (KeyExtent keyExtent : extents) counts.put(keyExtent, new MLong(0));
Text row = new Text();
FileSKVIterator index = FileOperations.getInstance().newIndexReaderBuilder().forFile(mapFile.toString(), ns, ns.getConf(), cs).withTableConfiguration(acuConf).withFileLenCache(fileLenCache).build();
try {
while (index.hasTop()) {
Key key = index.getTopKey();
totalIndexEntries++;
key.getRow(row);
// TODO this could use a binary search
for (Entry<KeyExtent, MLong> entry : counts.entrySet()) if (entry.getKey().contains(row))
entry.getValue().l++;
index.next();
}
} finally {
try {
if (index != null)
index.close();
} catch (IOException e) {
log.debug("Failed to close " + mapFile, e);
}
}
Map<KeyExtent, Long> results = new TreeMap<>();
for (KeyExtent keyExtent : extents) {
double numEntries = counts.get(keyExtent).l;
if (numEntries == 0)
numEntries = 1;
long estSize = (long) ((numEntries / totalIndexEntries) * fileSize);
results.put(keyExtent, estSize);
}
return results;
}
Aggregations