use of org.apache.accumulo.core.metadata.schema.DataFileValue in project accumulo by apache.
the class MetadataBatchScanTest method main.
public static void main(String[] args) throws Exception {
ClientOpts opts = new ClientOpts();
opts.parseArgs(MetadataBatchScanTest.class.getName(), args);
Instance inst = new ZooKeeperInstance(ClientConfiguration.create().withInstance("acu14").withZkHosts("localhost"));
final Connector connector = inst.getConnector(opts.getPrincipal(), opts.getToken());
TreeSet<Long> splits = new TreeSet<>();
Random r = new Random(42);
while (splits.size() < 99999) {
splits.add((r.nextLong() & 0x7fffffffffffffffl) % 1000000000000l);
}
Table.ID tid = Table.ID.of("8");
Text per = null;
ArrayList<KeyExtent> extents = new ArrayList<>();
for (Long split : splits) {
Text er = new Text(String.format("%012d", split));
KeyExtent ke = new KeyExtent(tid, er, per);
per = er;
extents.add(ke);
}
extents.add(new KeyExtent(tid, null, per));
if (args[0].equals("write")) {
BatchWriter bw = connector.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
for (KeyExtent extent : extents) {
Mutation mut = extent.getPrevRowUpdateMutation();
new TServerInstance(HostAndPort.fromParts("192.168.1.100", 4567), "DEADBEEF").putLocation(mut);
bw.addMutation(mut);
}
bw.close();
} else if (args[0].equals("writeFiles")) {
BatchWriter bw = connector.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
for (KeyExtent extent : extents) {
Mutation mut = new Mutation(extent.getMetadataEntry());
String dir = "/t-" + UUID.randomUUID();
TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.put(mut, new Value(dir.getBytes(UTF_8)));
for (int i = 0; i < 5; i++) {
mut.put(DataFileColumnFamily.NAME, new Text(dir + "/00000_0000" + i + ".map"), new DataFileValue(10000, 1000000).encodeAsValue());
}
bw.addMutation(mut);
}
bw.close();
} else if (args[0].equals("scan")) {
int numThreads = Integer.parseInt(args[1]);
final int numLoop = Integer.parseInt(args[2]);
int numLookups = Integer.parseInt(args[3]);
HashSet<Integer> indexes = new HashSet<>();
while (indexes.size() < numLookups) {
indexes.add(r.nextInt(extents.size()));
}
final List<Range> ranges = new ArrayList<>();
for (Integer i : indexes) {
ranges.add(extents.get(i).toMetadataRange());
}
Thread[] threads = new Thread[numThreads];
for (int i = 0; i < threads.length; i++) {
threads[i] = new Thread(new Runnable() {
@Override
public void run() {
try {
System.out.println(runScanTest(connector, numLoop, ranges));
} catch (Exception e) {
log.error("Exception while running scan test.", e);
}
}
});
}
long t1 = System.currentTimeMillis();
for (Thread thread : threads) {
thread.start();
}
for (Thread thread : threads) {
thread.join();
}
long t2 = System.currentTimeMillis();
System.out.printf("tt : %6.2f%n", (t2 - t1) / 1000.0);
} else {
throw new IllegalArgumentException();
}
}
use of org.apache.accumulo.core.metadata.schema.DataFileValue in project accumulo by apache.
the class SplitRecoveryIT method verifySame.
private void verifySame(SortedMap<FileRef, DataFileValue> datafileSizes, SortedMap<FileRef, DataFileValue> fixedDatafileSizes) throws Exception {
if (!datafileSizes.keySet().containsAll(fixedDatafileSizes.keySet()) || !fixedDatafileSizes.keySet().containsAll(datafileSizes.keySet())) {
throw new Exception("Key sets not the same " + datafileSizes.keySet() + " != " + fixedDatafileSizes.keySet());
}
for (Entry<FileRef, DataFileValue> entry : datafileSizes.entrySet()) {
DataFileValue dfv = entry.getValue();
DataFileValue otherDfv = fixedDatafileSizes.get(entry.getKey());
if (!dfv.equals(otherDfv)) {
throw new Exception(entry.getKey() + " dfv not equal " + dfv + " " + otherDfv);
}
}
}
use of org.apache.accumulo.core.metadata.schema.DataFileValue in project accumulo by apache.
the class TwoTierCompactionStrategy method calculateTotalSize.
/**
* Calculates the total size of input files in the compaction plan
*/
private Long calculateTotalSize(MajorCompactionRequest request, CompactionPlan plan) {
long totalSize = 0;
Map<FileRef, DataFileValue> allFiles = request.getFiles();
for (FileRef fileRef : plan.inputFiles) {
totalSize += allFiles.get(fileRef).getSize();
}
return totalSize;
}
use of org.apache.accumulo.core.metadata.schema.DataFileValue in project accumulo by apache.
the class TooManyDeletesCompactionStrategy method gatherInformation.
@Override
public void gatherInformation(MajorCompactionRequest request) throws IOException {
super.gatherInformation(request);
Predicate<SummarizerConfiguration> summarizerPredicate = conf -> conf.getClassName().equals(DeletesSummarizer.class.getName()) && conf.getOptions().isEmpty();
long total = 0;
long deletes = 0;
for (Entry<FileRef, DataFileValue> entry : request.getFiles().entrySet()) {
Collection<Summary> summaries = request.getSummaries(Collections.singleton(entry.getKey()), summarizerPredicate);
if (summaries.size() == 1) {
Summary summary = summaries.iterator().next();
total += summary.getStatistics().get(TOTAL_STAT);
deletes += summary.getStatistics().get(DELETES_STAT);
} else {
long numEntries = entry.getValue().getNumEntries();
if (numEntries == 0 && !proceed_bns) {
shouldCompact = false;
return;
} else {
// no summary data so use Accumulo's estimate of total entries in file
total += entry.getValue().getNumEntries();
}
}
}
long nonDeletes = total - deletes;
if (nonDeletes >= 0) {
// check nonDeletes >= 0 because if this is not true then its clear evidence that the estimates are off
double ratio = deletes / (double) nonDeletes;
shouldCompact = ratio >= threshold;
} else {
shouldCompact = false;
}
}
use of org.apache.accumulo.core.metadata.schema.DataFileValue in project accumulo by apache.
the class DatafileManager method reserveMergingMinorCompactionFile.
FileRef reserveMergingMinorCompactionFile() {
if (mergingMinorCompactionFile != null)
throw new IllegalStateException("Tried to reserve merging minor compaction file when already reserved : " + mergingMinorCompactionFile);
if (tablet.getExtent().isRootTablet())
return null;
int maxFiles = tablet.getTableConfiguration().getMaxFilesPerTablet();
// are canceled
if (majorCompactingFiles.size() > 0 && datafileSizes.size() == maxFiles)
return null;
if (datafileSizes.size() >= maxFiles) {
// find the smallest file
long maxFileSize = Long.MAX_VALUE;
maxMergingMinorCompactionFileSize = ConfigurationTypeHelper.getFixedMemoryAsBytes(tablet.getTableConfiguration().get(Property.TABLE_MINC_MAX_MERGE_FILE_SIZE));
if (maxMergingMinorCompactionFileSize > 0) {
maxFileSize = maxMergingMinorCompactionFileSize;
}
long min = maxFileSize;
FileRef minName = null;
for (Entry<FileRef, DataFileValue> entry : datafileSizes.entrySet()) {
if (entry.getValue().getSize() <= min && !majorCompactingFiles.contains(entry.getKey())) {
min = entry.getValue().getSize();
minName = entry.getKey();
}
}
if (minName == null)
return null;
mergingMinorCompactionFile = minName;
return minName;
}
return null;
}
Aggregations