use of org.apache.accumulo.core.metadata.schema.DataFileValue in project accumulo by apache.
the class MetadataTableUtil method splitDatafiles.
public static void splitDatafiles(Text midRow, double splitRatio, Map<FileRef, FileUtil.FileInfo> firstAndLastRows, SortedMap<FileRef, DataFileValue> datafiles, SortedMap<FileRef, DataFileValue> lowDatafileSizes, SortedMap<FileRef, DataFileValue> highDatafileSizes, List<FileRef> highDatafilesToRemove) {
for (Entry<FileRef, DataFileValue> entry : datafiles.entrySet()) {
Text firstRow = null;
Text lastRow = null;
boolean rowsKnown = false;
FileUtil.FileInfo mfi = firstAndLastRows.get(entry.getKey());
if (mfi != null) {
firstRow = mfi.getFirstRow();
lastRow = mfi.getLastRow();
rowsKnown = true;
}
if (rowsKnown && firstRow.compareTo(midRow) > 0) {
// only in high
long highSize = entry.getValue().getSize();
long highEntries = entry.getValue().getNumEntries();
highDatafileSizes.put(entry.getKey(), new DataFileValue(highSize, highEntries, entry.getValue().getTime()));
} else if (rowsKnown && lastRow.compareTo(midRow) <= 0) {
// only in low
long lowSize = entry.getValue().getSize();
long lowEntries = entry.getValue().getNumEntries();
lowDatafileSizes.put(entry.getKey(), new DataFileValue(lowSize, lowEntries, entry.getValue().getTime()));
highDatafilesToRemove.add(entry.getKey());
} else {
long lowSize = (long) Math.floor((entry.getValue().getSize() * splitRatio));
long lowEntries = (long) Math.floor((entry.getValue().getNumEntries() * splitRatio));
lowDatafileSizes.put(entry.getKey(), new DataFileValue(lowSize, lowEntries, entry.getValue().getTime()));
long highSize = (long) Math.ceil((entry.getValue().getSize() * (1.0 - splitRatio)));
long highEntries = (long) Math.ceil((entry.getValue().getNumEntries() * (1.0 - splitRatio)));
highDatafileSizes.put(entry.getKey(), new DataFileValue(highSize, highEntries, entry.getValue().getTime()));
}
}
}
use of org.apache.accumulo.core.metadata.schema.DataFileValue in project accumulo by apache.
the class MetadataTableUtil method getDataFileSizes.
public static SortedMap<FileRef, DataFileValue> getDataFileSizes(KeyExtent extent, ClientContext context) throws IOException {
TreeMap<FileRef, DataFileValue> sizes = new TreeMap<>();
try (Scanner mdScanner = new ScannerImpl(context, MetadataTable.ID, Authorizations.EMPTY)) {
mdScanner.fetchColumnFamily(DataFileColumnFamily.NAME);
Text row = extent.getMetadataEntry();
VolumeManager fs = VolumeManagerImpl.get();
Key endKey = new Key(row, DataFileColumnFamily.NAME, new Text(""));
endKey = endKey.followingKey(PartialKey.ROW_COLFAM);
mdScanner.setRange(new Range(new Key(row), endKey));
for (Entry<Key, Value> entry : mdScanner) {
if (!entry.getKey().getRow().equals(row))
break;
DataFileValue dfv = new DataFileValue(entry.getValue().get());
sizes.put(new FileRef(fs, entry.getKey()), dfv);
}
return sizes;
}
}
use of org.apache.accumulo.core.metadata.schema.DataFileValue in project accumulo by apache.
the class MetadataTableUtil method updateTabletDataFile.
public static void updateTabletDataFile(long tid, KeyExtent extent, Map<FileRef, DataFileValue> estSizes, String time, ClientContext context, ZooLock zooLock) {
Mutation m = new Mutation(extent.getMetadataEntry());
byte[] tidBytes = Long.toString(tid).getBytes(UTF_8);
for (Entry<FileRef, DataFileValue> entry : estSizes.entrySet()) {
Text file = entry.getKey().meta();
m.put(DataFileColumnFamily.NAME, file, new Value(entry.getValue().encode()));
m.put(TabletsSection.BulkFileColumnFamily.NAME, file, new Value(tidBytes));
}
TabletsSection.ServerColumnFamily.TIME_COLUMN.put(m, new Value(time.getBytes(UTF_8)));
update(context, zooLock, m, extent);
}
use of org.apache.accumulo.core.metadata.schema.DataFileValue in project accumulo by apache.
the class CloneIT method createTablet.
private static Mutation createTablet(String tid, String endRow, String prevRow, String dir, String file) throws Exception {
KeyExtent ke = new KeyExtent(Table.ID.of(tid), endRow == null ? null : new Text(endRow), prevRow == null ? null : new Text(prevRow));
Mutation mut = ke.getPrevRowUpdateMutation();
TabletsSection.ServerColumnFamily.TIME_COLUMN.put(mut, new Value("M0".getBytes()));
TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.put(mut, new Value(dir.getBytes()));
mut.put(DataFileColumnFamily.NAME.toString(), file, new DataFileValue(10, 200).encodeAsString());
return mut;
}
use of org.apache.accumulo.core.metadata.schema.DataFileValue in project accumulo by apache.
the class CloneIT method testFilesChange.
@Test
public void testFilesChange() throws Exception {
Connector conn = getConnector();
String tableName = getUniqueNames(1)[0];
conn.tableOperations().create(tableName);
KeyExtent ke = new KeyExtent(Table.ID.of("0"), null, null);
Mutation mut = ke.getPrevRowUpdateMutation();
TabletsSection.ServerColumnFamily.TIME_COLUMN.put(mut, new Value("M0".getBytes()));
TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.put(mut, new Value("/default_tablet".getBytes()));
mut.put(DataFileColumnFamily.NAME.toString(), "/default_tablet/0_0.rf", new DataFileValue(1, 200).encodeAsString());
BatchWriter bw1 = conn.createBatchWriter(tableName, new BatchWriterConfig());
bw1.addMutation(mut);
bw1.flush();
BatchWriter bw2 = conn.createBatchWriter(tableName, new BatchWriterConfig());
MetadataTableUtil.initializeClone(tableName, Table.ID.of("0"), Table.ID.of("1"), conn, bw2);
Mutation mut2 = new Mutation(ke.getMetadataEntry());
mut2.putDelete(DataFileColumnFamily.NAME.toString(), "/default_tablet/0_0.rf");
mut2.put(DataFileColumnFamily.NAME.toString(), "/default_tablet/1_0.rf", new DataFileValue(2, 300).encodeAsString());
bw1.addMutation(mut2);
bw1.flush();
int rc = MetadataTableUtil.checkClone(tableName, Table.ID.of("0"), Table.ID.of("1"), conn, bw2);
assertEquals(1, rc);
rc = MetadataTableUtil.checkClone(tableName, Table.ID.of("0"), Table.ID.of("1"), conn, bw2);
assertEquals(0, rc);
HashSet<String> files = new HashSet<>();
try (Scanner scanner = conn.createScanner(tableName, Authorizations.EMPTY)) {
scanner.setRange(new KeyExtent(Table.ID.of("1"), null, null).toMetadataRange());
for (Entry<Key, Value> entry : scanner) {
if (entry.getKey().getColumnFamily().equals(DataFileColumnFamily.NAME))
files.add(entry.getKey().getColumnQualifier().toString());
}
}
assertEquals(1, files.size());
assertTrue(files.contains("../0/default_tablet/1_0.rf"));
}
Aggregations