use of org.apache.accumulo.core.client.BatchWriter in project accumulo by apache.
the class CleanTmpIT method test.
@Test
public void test() throws Exception {
Connector c = getConnector();
// make a table
String tableName = getUniqueNames(1)[0];
c.tableOperations().create(tableName);
// write to it
BatchWriter bw = c.createBatchWriter(tableName, new BatchWriterConfig());
Mutation m = new Mutation("row");
m.put("cf", "cq", "value");
bw.addMutation(m);
bw.flush();
// Compact memory to make a file
c.tableOperations().compact(tableName, null, null, true, true);
// Make sure that we'll have a WAL
m = new Mutation("row2");
m.put("cf", "cq", "value");
bw.addMutation(m);
bw.close();
// create a fake _tmp file in its directory
String id = c.tableOperations().tableIdMap().get(tableName);
Path file;
try (Scanner s = c.createScanner(MetadataTable.NAME, Authorizations.EMPTY)) {
s.setRange(Range.prefix(id));
s.fetchColumnFamily(MetadataSchema.TabletsSection.DataFileColumnFamily.NAME);
Entry<Key, Value> entry = Iterables.getOnlyElement(s);
file = new Path(entry.getKey().getColumnQualifier().toString());
}
FileSystem fs = getCluster().getFileSystem();
assertTrue("Could not find file: " + file, fs.exists(file));
Path tabletDir = file.getParent();
assertNotNull("Tablet dir should not be null", tabletDir);
Path tmp = new Path(tabletDir, "junk.rf_tmp");
// Make the file
fs.create(tmp).close();
log.info("Created tmp file {}", tmp.toString());
getCluster().stop();
getCluster().start();
try (Scanner scanner = c.createScanner(tableName, Authorizations.EMPTY)) {
assertEquals(2, Iterators.size(scanner.iterator()));
// If we performed log recovery, we should have cleaned up any stray files
assertFalse("File still exists: " + tmp, fs.exists(tmp));
}
}
use of org.apache.accumulo.core.client.BatchWriter in project accumulo by apache.
the class CleanUpIT method run.
@Test
public void run() throws Exception {
String tableName = getUniqueNames(1)[0];
getConnector().tableOperations().create(tableName);
BatchWriter bw = getConnector().createBatchWriter(tableName, new BatchWriterConfig());
Mutation m1 = new Mutation("r1");
m1.put("cf1", "cq1", 1, "5");
bw.addMutation(m1);
bw.flush();
try (Scanner scanner = getConnector().createScanner(tableName, new Authorizations())) {
int count = 0;
for (Entry<Key, Value> entry : scanner) {
count++;
if (!entry.getValue().toString().equals("5")) {
Assert.fail("Unexpected value " + entry.getValue());
}
}
Assert.assertEquals("Unexpected count", 1, count);
int threadCount = countThreads();
if (threadCount < 2) {
printThreadNames();
Assert.fail("Not seeing expected threads. Saw " + threadCount);
}
CleanUp.shutdownNow();
Mutation m2 = new Mutation("r2");
m2.put("cf1", "cq1", 1, "6");
try {
bw.addMutation(m1);
bw.flush();
Assert.fail("batch writer did not fail");
} catch (Exception e) {
}
try {
// expect this to fail also, want to clean up batch writer threads
bw.close();
Assert.fail("batch writer close not fail");
} catch (Exception e) {
}
try {
count = 0;
Iterator<Entry<Key, Value>> iter = scanner.iterator();
while (iter.hasNext()) {
iter.next();
count++;
}
Assert.fail("scanner did not fail");
} catch (Exception e) {
}
threadCount = countThreads();
if (threadCount > 0) {
printThreadNames();
Assert.fail("Threads did not go away. Saw " + threadCount);
}
}
}
use of org.apache.accumulo.core.client.BatchWriter in project accumulo by apache.
the class CloneTestIT method writeData.
private BatchWriter writeData(String table1, Connector c) throws TableNotFoundException, MutationsRejectedException {
BatchWriter bw = c.createBatchWriter(table1, new BatchWriterConfig());
Mutation m1 = new Mutation("001");
m1.put("data", "x", "9");
m1.put("data", "y", "7");
Mutation m2 = new Mutation("008");
m2.put("data", "x", "3");
m2.put("data", "y", "4");
bw.addMutation(m1);
bw.addMutation(m2);
bw.flush();
return bw;
}
use of org.apache.accumulo.core.client.BatchWriter in project accumulo by apache.
the class CloneTestIT method testProps.
@Test
public void testProps() throws Exception {
String[] tableNames = getUniqueNames(2);
String table1 = tableNames[0];
String table2 = tableNames[1];
Connector c = getConnector();
c.tableOperations().create(table1);
c.tableOperations().setProperty(table1, Property.TABLE_FILE_COMPRESSED_BLOCK_SIZE.getKey(), "1M");
c.tableOperations().setProperty(table1, Property.TABLE_FILE_COMPRESSED_BLOCK_SIZE_INDEX.getKey(), "2M");
c.tableOperations().setProperty(table1, Property.TABLE_FILE_MAX.getKey(), "23");
BatchWriter bw = writeData(table1, c);
Map<String, String> props = new HashMap<>();
props.put(Property.TABLE_FILE_COMPRESSED_BLOCK_SIZE.getKey(), "500K");
Set<String> exclude = new HashSet<>();
exclude.add(Property.TABLE_FILE_MAX.getKey());
c.tableOperations().clone(table1, table2, true, props, exclude);
Mutation m3 = new Mutation("009");
m3.put("data", "x", "1");
m3.put("data", "y", "2");
bw.addMutation(m3);
bw.close();
checkData(table2, c);
checkMetadata(table2, c);
HashMap<String, String> tableProps = new HashMap<>();
for (Entry<String, String> prop : c.tableOperations().getProperties(table2)) {
tableProps.put(prop.getKey(), prop.getValue());
}
Assert.assertEquals("500K", tableProps.get(Property.TABLE_FILE_COMPRESSED_BLOCK_SIZE.getKey()));
Assert.assertEquals(Property.TABLE_FILE_MAX.getDefaultValue(), tableProps.get(Property.TABLE_FILE_MAX.getKey()));
Assert.assertEquals("2M", tableProps.get(Property.TABLE_FILE_COMPRESSED_BLOCK_SIZE_INDEX.getKey()));
c.tableOperations().delete(table1);
c.tableOperations().delete(table2);
}
use of org.apache.accumulo.core.client.BatchWriter in project accumulo by apache.
the class CloneTestIT method testDeleteClone.
@Test
public void testDeleteClone() throws Exception {
String[] tableNames = getUniqueNames(3);
String table1 = tableNames[0];
String table2 = tableNames[1];
String table3 = tableNames[2];
Connector c = getConnector();
AccumuloCluster cluster = getCluster();
Assume.assumeTrue(cluster instanceof MiniAccumuloClusterImpl);
MiniAccumuloClusterImpl mac = (MiniAccumuloClusterImpl) cluster;
String rootPath = mac.getConfig().getDir().getAbsolutePath();
// verify that deleting a new table removes the files
c.tableOperations().create(table3);
writeData(table3, c).close();
c.tableOperations().flush(table3, null, null, true);
// check for files
FileSystem fs = getCluster().getFileSystem();
String id = c.tableOperations().tableIdMap().get(table3);
FileStatus[] status = fs.listStatus(new Path(rootPath + "/accumulo/tables/" + id));
assertTrue(status.length > 0);
// verify disk usage
List<DiskUsage> diskUsage = c.tableOperations().getDiskUsage(Collections.singleton(table3));
assertEquals(1, diskUsage.size());
assertTrue(diskUsage.get(0).getUsage() > 100);
// delete the table
c.tableOperations().delete(table3);
// verify its gone from the file system
Path tablePath = new Path(rootPath + "/accumulo/tables/" + id);
if (fs.exists(tablePath)) {
status = fs.listStatus(tablePath);
assertTrue(status == null || status.length == 0);
}
c.tableOperations().create(table1);
BatchWriter bw = writeData(table1, c);
Map<String, String> props = new HashMap<>();
props.put(Property.TABLE_FILE_COMPRESSED_BLOCK_SIZE.getKey(), "500K");
Set<String> exclude = new HashSet<>();
exclude.add(Property.TABLE_FILE_MAX.getKey());
c.tableOperations().clone(table1, table2, true, props, exclude);
Mutation m3 = new Mutation("009");
m3.put("data", "x", "1");
m3.put("data", "y", "2");
bw.addMutation(m3);
bw.close();
// delete source table, should not affect clone
c.tableOperations().delete(table1);
checkData(table2, c);
c.tableOperations().compact(table2, null, null, true, true);
checkData(table2, c);
c.tableOperations().delete(table2);
}
Aggregations