use of org.apache.accumulo.core.client.Connector in project accumulo by apache.
the class CleanTmpIT method test.
@Test
public void test() throws Exception {
Connector c = getConnector();
// make a table
String tableName = getUniqueNames(1)[0];
c.tableOperations().create(tableName);
// write to it
BatchWriter bw = c.createBatchWriter(tableName, new BatchWriterConfig());
Mutation m = new Mutation("row");
m.put("cf", "cq", "value");
bw.addMutation(m);
bw.flush();
// Compact memory to make a file
c.tableOperations().compact(tableName, null, null, true, true);
// Make sure that we'll have a WAL
m = new Mutation("row2");
m.put("cf", "cq", "value");
bw.addMutation(m);
bw.close();
// create a fake _tmp file in its directory
String id = c.tableOperations().tableIdMap().get(tableName);
Path file;
try (Scanner s = c.createScanner(MetadataTable.NAME, Authorizations.EMPTY)) {
s.setRange(Range.prefix(id));
s.fetchColumnFamily(MetadataSchema.TabletsSection.DataFileColumnFamily.NAME);
Entry<Key, Value> entry = Iterables.getOnlyElement(s);
file = new Path(entry.getKey().getColumnQualifier().toString());
}
FileSystem fs = getCluster().getFileSystem();
assertTrue("Could not find file: " + file, fs.exists(file));
Path tabletDir = file.getParent();
assertNotNull("Tablet dir should not be null", tabletDir);
Path tmp = new Path(tabletDir, "junk.rf_tmp");
// Make the file
fs.create(tmp).close();
log.info("Created tmp file {}", tmp.toString());
getCluster().stop();
getCluster().start();
try (Scanner scanner = c.createScanner(tableName, Authorizations.EMPTY)) {
assertEquals(2, Iterators.size(scanner.iterator()));
// If we performed log recovery, we should have cleaned up any stray files
assertFalse("File still exists: " + tmp, fs.exists(tmp));
}
}
use of org.apache.accumulo.core.client.Connector in project accumulo by apache.
the class CloneTestIT method testProps.
@Test
public void testProps() throws Exception {
String[] tableNames = getUniqueNames(2);
String table1 = tableNames[0];
String table2 = tableNames[1];
Connector c = getConnector();
c.tableOperations().create(table1);
c.tableOperations().setProperty(table1, Property.TABLE_FILE_COMPRESSED_BLOCK_SIZE.getKey(), "1M");
c.tableOperations().setProperty(table1, Property.TABLE_FILE_COMPRESSED_BLOCK_SIZE_INDEX.getKey(), "2M");
c.tableOperations().setProperty(table1, Property.TABLE_FILE_MAX.getKey(), "23");
BatchWriter bw = writeData(table1, c);
Map<String, String> props = new HashMap<>();
props.put(Property.TABLE_FILE_COMPRESSED_BLOCK_SIZE.getKey(), "500K");
Set<String> exclude = new HashSet<>();
exclude.add(Property.TABLE_FILE_MAX.getKey());
c.tableOperations().clone(table1, table2, true, props, exclude);
Mutation m3 = new Mutation("009");
m3.put("data", "x", "1");
m3.put("data", "y", "2");
bw.addMutation(m3);
bw.close();
checkData(table2, c);
checkMetadata(table2, c);
HashMap<String, String> tableProps = new HashMap<>();
for (Entry<String, String> prop : c.tableOperations().getProperties(table2)) {
tableProps.put(prop.getKey(), prop.getValue());
}
Assert.assertEquals("500K", tableProps.get(Property.TABLE_FILE_COMPRESSED_BLOCK_SIZE.getKey()));
Assert.assertEquals(Property.TABLE_FILE_MAX.getDefaultValue(), tableProps.get(Property.TABLE_FILE_MAX.getKey()));
Assert.assertEquals("2M", tableProps.get(Property.TABLE_FILE_COMPRESSED_BLOCK_SIZE_INDEX.getKey()));
c.tableOperations().delete(table1);
c.tableOperations().delete(table2);
}
use of org.apache.accumulo.core.client.Connector in project accumulo by apache.
the class CloneTestIT method testDeleteClone.
@Test
public void testDeleteClone() throws Exception {
String[] tableNames = getUniqueNames(3);
String table1 = tableNames[0];
String table2 = tableNames[1];
String table3 = tableNames[2];
Connector c = getConnector();
AccumuloCluster cluster = getCluster();
Assume.assumeTrue(cluster instanceof MiniAccumuloClusterImpl);
MiniAccumuloClusterImpl mac = (MiniAccumuloClusterImpl) cluster;
String rootPath = mac.getConfig().getDir().getAbsolutePath();
// verify that deleting a new table removes the files
c.tableOperations().create(table3);
writeData(table3, c).close();
c.tableOperations().flush(table3, null, null, true);
// check for files
FileSystem fs = getCluster().getFileSystem();
String id = c.tableOperations().tableIdMap().get(table3);
FileStatus[] status = fs.listStatus(new Path(rootPath + "/accumulo/tables/" + id));
assertTrue(status.length > 0);
// verify disk usage
List<DiskUsage> diskUsage = c.tableOperations().getDiskUsage(Collections.singleton(table3));
assertEquals(1, diskUsage.size());
assertTrue(diskUsage.get(0).getUsage() > 100);
// delete the table
c.tableOperations().delete(table3);
// verify its gone from the file system
Path tablePath = new Path(rootPath + "/accumulo/tables/" + id);
if (fs.exists(tablePath)) {
status = fs.listStatus(tablePath);
assertTrue(status == null || status.length == 0);
}
c.tableOperations().create(table1);
BatchWriter bw = writeData(table1, c);
Map<String, String> props = new HashMap<>();
props.put(Property.TABLE_FILE_COMPRESSED_BLOCK_SIZE.getKey(), "500K");
Set<String> exclude = new HashSet<>();
exclude.add(Property.TABLE_FILE_MAX.getKey());
c.tableOperations().clone(table1, table2, true, props, exclude);
Mutation m3 = new Mutation("009");
m3.put("data", "x", "1");
m3.put("data", "y", "2");
bw.addMutation(m3);
bw.close();
// delete source table, should not affect clone
c.tableOperations().delete(table1);
checkData(table2, c);
c.tableOperations().compact(table2, null, null, true, true);
checkData(table2, c);
c.tableOperations().delete(table2);
}
use of org.apache.accumulo.core.client.Connector in project accumulo by apache.
the class CombinerIT method aggregationTest.
@Test
public void aggregationTest() throws Exception {
Connector c = getConnector();
String tableName = getUniqueNames(1)[0];
c.tableOperations().create(tableName);
IteratorSetting setting = new IteratorSetting(10, SummingCombiner.class);
SummingCombiner.setEncodingType(setting, Type.STRING);
SummingCombiner.setColumns(setting, Collections.singletonList(new IteratorSetting.Column("cf")));
c.tableOperations().attachIterator(tableName, setting);
BatchWriter bw = c.createBatchWriter(tableName, new BatchWriterConfig());
for (int i = 0; i < 10; i++) {
Mutation m = new Mutation("row1");
m.put("cf".getBytes(), "col1".getBytes(), ("" + i).getBytes());
bw.addMutation(m);
}
bw.close();
checkSum(tableName, c);
}
use of org.apache.accumulo.core.client.Connector in project accumulo by apache.
the class ConcurrencyIT method run.
// @formatter:off
// Below is a diagram of the operations in this test over time.
//
// Scan 0 |------------------------------|
// Scan 1 |----------|
// Minc 1 |-----|
// Scan 2 |----------|
// Scan 3 |---------------|
// Minc 2 |-----|
// Majc 1 |-----|
// @formatter:on
@Test
public void run() throws Exception {
Connector c = getConnector();
runTest(c, getUniqueNames(1)[0]);
}
Aggregations