use of org.apache.accumulo.core.metadata.schema.DataFileValue in project accumulo by apache.
the class MetadataTableUtil method updateTabletVolumes.
public static void updateTabletVolumes(KeyExtent extent, List<LogEntry> logsToRemove, List<LogEntry> logsToAdd, List<FileRef> filesToRemove, SortedMap<FileRef, DataFileValue> filesToAdd, String newDir, ZooLock zooLock, AccumuloServerContext context) {
if (extent.isRootTablet()) {
if (newDir != null)
throw new IllegalArgumentException("newDir not expected for " + extent);
if (filesToRemove.size() != 0 || filesToAdd.size() != 0)
throw new IllegalArgumentException("files not expected for " + extent);
// add before removing in case of process death
for (LogEntry logEntry : logsToAdd) addRootLogEntry(context, zooLock, logEntry);
removeUnusedWALEntries(context, extent, logsToRemove, zooLock);
} else {
Mutation m = new Mutation(extent.getMetadataEntry());
for (LogEntry logEntry : logsToRemove) m.putDelete(logEntry.getColumnFamily(), logEntry.getColumnQualifier());
for (LogEntry logEntry : logsToAdd) m.put(logEntry.getColumnFamily(), logEntry.getColumnQualifier(), logEntry.getValue());
for (FileRef fileRef : filesToRemove) m.putDelete(DataFileColumnFamily.NAME, fileRef.meta());
for (Entry<FileRef, DataFileValue> entry : filesToAdd.entrySet()) m.put(DataFileColumnFamily.NAME, entry.getKey().meta(), new Value(entry.getValue().encode()));
if (newDir != null)
ServerColumnFamily.DIRECTORY_COLUMN.put(m, new Value(newDir.getBytes(UTF_8)));
update(context, m, extent);
}
}
use of org.apache.accumulo.core.metadata.schema.DataFileValue in project accumulo by apache.
the class MetadataTableUtil method getFileAndLogEntries.
public static Pair<List<LogEntry>, SortedMap<FileRef, DataFileValue>> getFileAndLogEntries(ClientContext context, KeyExtent extent) throws KeeperException, InterruptedException, IOException {
ArrayList<LogEntry> result = new ArrayList<>();
TreeMap<FileRef, DataFileValue> sizes = new TreeMap<>();
VolumeManager fs = VolumeManagerImpl.get();
if (extent.isRootTablet()) {
getRootLogEntries(result);
Path rootDir = new Path(getRootTabletDir());
FileStatus[] files = fs.listStatus(rootDir);
for (FileStatus fileStatus : files) {
if (fileStatus.getPath().toString().endsWith("_tmp")) {
continue;
}
DataFileValue dfv = new DataFileValue(0, 0);
sizes.put(new FileRef(fileStatus.getPath().toString(), fileStatus.getPath()), dfv);
}
} else {
Table.ID systemTableToCheck = extent.isMeta() ? RootTable.ID : MetadataTable.ID;
try (Scanner scanner = new ScannerImpl(context, systemTableToCheck, Authorizations.EMPTY)) {
scanner.fetchColumnFamily(LogColumnFamily.NAME);
scanner.fetchColumnFamily(DataFileColumnFamily.NAME);
scanner.setRange(extent.toMetadataRange());
for (Entry<Key, Value> entry : scanner) {
if (!entry.getKey().getRow().equals(extent.getMetadataEntry())) {
throw new RuntimeException("Unexpected row " + entry.getKey().getRow() + " expected " + extent.getMetadataEntry());
}
if (entry.getKey().getColumnFamily().equals(LogColumnFamily.NAME)) {
result.add(LogEntry.fromKeyValue(entry.getKey(), entry.getValue()));
} else if (entry.getKey().getColumnFamily().equals(DataFileColumnFamily.NAME)) {
DataFileValue dfv = new DataFileValue(entry.getValue().get());
sizes.put(new FileRef(fs, entry.getKey()), dfv);
} else {
throw new RuntimeException("Unexpected col fam " + entry.getKey().getColumnFamily());
}
}
}
}
return new Pair<>(result, sizes);
}
use of org.apache.accumulo.core.metadata.schema.DataFileValue in project accumulo by apache.
the class MetadataConstraintsTest method testBulkFileCheck.
@Test
public void testBulkFileCheck() {
MetadataConstraints mc = new TestMetadataConstraints();
Mutation m;
List<Short> violations;
// inactive txid
m = new Mutation(new Text("0;foo"));
m.put(TabletsSection.BulkFileColumnFamily.NAME, new Text("/someFile"), new Value("12345".getBytes()));
m.put(DataFileColumnFamily.NAME, new Text("/someFile"), new DataFileValue(1, 1).encodeAsValue());
violations = mc.check(null, m);
assertNotNull(violations);
assertEquals(1, violations.size());
assertEquals(Short.valueOf((short) 8), violations.get(0));
// txid that throws exception
m = new Mutation(new Text("0;foo"));
m.put(TabletsSection.BulkFileColumnFamily.NAME, new Text("/someFile"), new Value("9".getBytes()));
m.put(DataFileColumnFamily.NAME, new Text("/someFile"), new DataFileValue(1, 1).encodeAsValue());
violations = mc.check(null, m);
assertNotNull(violations);
assertEquals(1, violations.size());
assertEquals(Short.valueOf((short) 8), violations.get(0));
// active txid w/ file
m = new Mutation(new Text("0;foo"));
m.put(TabletsSection.BulkFileColumnFamily.NAME, new Text("/someFile"), new Value("5".getBytes()));
m.put(DataFileColumnFamily.NAME, new Text("/someFile"), new DataFileValue(1, 1).encodeAsValue());
violations = mc.check(null, m);
assertNull(violations);
// active txid w/o file
m = new Mutation(new Text("0;foo"));
m.put(TabletsSection.BulkFileColumnFamily.NAME, new Text("/someFile"), new Value("5".getBytes()));
violations = mc.check(null, m);
assertNotNull(violations);
assertEquals(1, violations.size());
assertEquals(Short.valueOf((short) 8), violations.get(0));
// two active txids w/ files
m = new Mutation(new Text("0;foo"));
m.put(TabletsSection.BulkFileColumnFamily.NAME, new Text("/someFile"), new Value("5".getBytes()));
m.put(DataFileColumnFamily.NAME, new Text("/someFile"), new DataFileValue(1, 1).encodeAsValue());
m.put(TabletsSection.BulkFileColumnFamily.NAME, new Text("/someFile2"), new Value("7".getBytes()));
m.put(DataFileColumnFamily.NAME, new Text("/someFile2"), new DataFileValue(1, 1).encodeAsValue());
violations = mc.check(null, m);
assertNotNull(violations);
assertEquals(1, violations.size());
assertEquals(Short.valueOf((short) 8), violations.get(0));
// two files w/ one active txid
m = new Mutation(new Text("0;foo"));
m.put(TabletsSection.BulkFileColumnFamily.NAME, new Text("/someFile"), new Value("5".getBytes()));
m.put(DataFileColumnFamily.NAME, new Text("/someFile"), new DataFileValue(1, 1).encodeAsValue());
m.put(TabletsSection.BulkFileColumnFamily.NAME, new Text("/someFile2"), new Value("5".getBytes()));
m.put(DataFileColumnFamily.NAME, new Text("/someFile2"), new DataFileValue(1, 1).encodeAsValue());
violations = mc.check(null, m);
assertNull(violations);
// two loaded w/ one active txid and one file
m = new Mutation(new Text("0;foo"));
m.put(TabletsSection.BulkFileColumnFamily.NAME, new Text("/someFile"), new Value("5".getBytes()));
m.put(DataFileColumnFamily.NAME, new Text("/someFile"), new DataFileValue(1, 1).encodeAsValue());
m.put(TabletsSection.BulkFileColumnFamily.NAME, new Text("/someFile2"), new Value("5".getBytes()));
violations = mc.check(null, m);
assertNotNull(violations);
assertEquals(1, violations.size());
assertEquals(Short.valueOf((short) 8), violations.get(0));
// active txid, mutation that looks like split
m = new Mutation(new Text("0;foo"));
m.put(TabletsSection.BulkFileColumnFamily.NAME, new Text("/someFile"), new Value("5".getBytes()));
TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.put(m, new Value("/t1".getBytes()));
violations = mc.check(null, m);
assertNull(violations);
// inactive txid, mutation that looks like split
m = new Mutation(new Text("0;foo"));
m.put(TabletsSection.BulkFileColumnFamily.NAME, new Text("/someFile"), new Value("12345".getBytes()));
TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.put(m, new Value("/t1".getBytes()));
violations = mc.check(null, m);
assertNull(violations);
// active txid, mutation that looks like a load
m = new Mutation(new Text("0;foo"));
m.put(TabletsSection.BulkFileColumnFamily.NAME, new Text("/someFile"), new Value("5".getBytes()));
m.put(TabletsSection.CurrentLocationColumnFamily.NAME, new Text("789"), new Value("127.0.0.1:9997".getBytes()));
violations = mc.check(null, m);
assertNull(violations);
// inactive txid, mutation that looks like a load
m = new Mutation(new Text("0;foo"));
m.put(TabletsSection.BulkFileColumnFamily.NAME, new Text("/someFile"), new Value("12345".getBytes()));
m.put(TabletsSection.CurrentLocationColumnFamily.NAME, new Text("789"), new Value("127.0.0.1:9997".getBytes()));
violations = mc.check(null, m);
assertNull(violations);
// deleting a load flag
m = new Mutation(new Text("0;foo"));
m.putDelete(TabletsSection.BulkFileColumnFamily.NAME, new Text("/someFile"));
violations = mc.check(null, m);
assertNull(violations);
}
use of org.apache.accumulo.core.metadata.schema.DataFileValue in project accumulo by apache.
the class MetadataBulkLoadFilterTest method testBasic.
@Test
public void testBasic() throws IOException {
TreeMap<Key, Value> tm1 = new TreeMap<>();
TreeMap<Key, Value> expected = new TreeMap<>();
// following should not be deleted by filter
put(tm1, "2;m", TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN, "/t1");
put(tm1, "2;m", DataFileColumnFamily.NAME, "/t1/file1", new DataFileValue(1, 1).encodeAsString());
put(tm1, "2;m", TabletsSection.BulkFileColumnFamily.NAME, "/t1/file1", "5");
put(tm1, "2;m", TabletsSection.BulkFileColumnFamily.NAME, "/t1/file3", "7");
put(tm1, "2;m", TabletsSection.BulkFileColumnFamily.NAME, "/t1/file4", "9");
put(tm1, "2<", TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN, "/t2");
put(tm1, "2<", DataFileColumnFamily.NAME, "/t2/file2", new DataFileValue(1, 1).encodeAsString());
put(tm1, "2<", TabletsSection.BulkFileColumnFamily.NAME, "/t2/file6", "5");
put(tm1, "2<", TabletsSection.BulkFileColumnFamily.NAME, "/t2/file7", "7");
put(tm1, "2<", TabletsSection.BulkFileColumnFamily.NAME, "/t2/file8", "9");
put(tm1, "2<", TabletsSection.BulkFileColumnFamily.NAME, "/t2/fileC", null);
expected.putAll(tm1);
// the following should be deleted by filter
put(tm1, "2;m", TabletsSection.BulkFileColumnFamily.NAME, "/t1/file5", "8");
put(tm1, "2<", TabletsSection.BulkFileColumnFamily.NAME, "/t2/file9", "8");
put(tm1, "2<", TabletsSection.BulkFileColumnFamily.NAME, "/t2/fileA", "2");
TestMetadataBulkLoadFilter iter = new TestMetadataBulkLoadFilter();
iter.init(new SortedMapIterator(tm1), new HashMap<>(), new BaseIteratorEnvironment() {
@Override
public boolean isFullMajorCompaction() {
return false;
}
@Override
public IteratorScope getIteratorScope() {
return IteratorScope.majc;
}
});
iter.seek(new Range(), new ArrayList<>(), false);
TreeMap<Key, Value> actual = new TreeMap<>();
while (iter.hasTop()) {
actual.put(iter.getTopKey(), iter.getTopValue());
iter.next();
}
Assert.assertEquals(expected, actual);
}
use of org.apache.accumulo.core.metadata.schema.DataFileValue in project accumulo by apache.
the class CloneIT method testMerge.
// test two tablets splitting into four
@Test
public void testMerge() throws Exception {
Connector conn = getConnector();
String tableName = getUniqueNames(1)[0];
conn.tableOperations().create(tableName);
BatchWriter bw1 = conn.createBatchWriter(tableName, new BatchWriterConfig());
bw1.addMutation(createTablet("0", "m", null, "/d1", "/d1/file1"));
bw1.addMutation(createTablet("0", null, "m", "/d2", "/d2/file2"));
bw1.flush();
BatchWriter bw2 = conn.createBatchWriter(tableName, new BatchWriterConfig());
MetadataTableUtil.initializeClone(tableName, Table.ID.of("0"), Table.ID.of("1"), conn, bw2);
bw1.addMutation(deleteTablet("0", "m", null, "/d1", "/d1/file1"));
Mutation mut = createTablet("0", null, null, "/d2", "/d2/file2");
mut.put(DataFileColumnFamily.NAME.toString(), "/d1/file1", new DataFileValue(10, 200).encodeAsString());
bw1.addMutation(mut);
bw1.flush();
try {
MetadataTableUtil.checkClone(tableName, Table.ID.of("0"), Table.ID.of("1"), conn, bw2);
assertTrue(false);
} catch (TabletIterator.TabletDeletedException tde) {
}
}
Aggregations