use of org.apache.accumulo.core.client.Connector in project accumulo by apache.
the class UserCompactionStrategyIT method testIterators.
@Test
public void testIterators() throws Exception {
// test compaction strategy + iterators
Connector c = getConnector();
String tableName = getUniqueNames(1)[0];
c.tableOperations().create(tableName);
writeFlush(c, tableName, "a");
writeFlush(c, tableName, "b");
// create a file that starts with A containing rows 'a' and 'b'
c.tableOperations().compact(tableName, new CompactionConfig().setWait(true));
writeFlush(c, tableName, "c");
writeFlush(c, tableName, "d");
Assert.assertEquals(3, FunctionalTestUtils.countRFiles(c, tableName));
// drop files that start with A
CompactionStrategyConfig csConfig = new CompactionStrategyConfig(TestCompactionStrategy.class.getName());
csConfig.setOptions(ImmutableMap.of("inputPrefix", "F"));
IteratorSetting iterConf = new IteratorSetting(21, "myregex", RegExFilter.class);
RegExFilter.setRegexs(iterConf, "a|c", null, null, null, false);
c.tableOperations().compact(tableName, new CompactionConfig().setWait(true).setCompactionStrategy(csConfig).setIterators(Arrays.asList(iterConf)));
// compaction strategy should only be applied to one file. If its applied to both, then row 'b' would be dropped by filter.
Assert.assertEquals(ImmutableSet.of("a", "b", "c"), getRows(c, tableName));
Assert.assertEquals(2, FunctionalTestUtils.countRFiles(c, tableName));
c.tableOperations().compact(tableName, new CompactionConfig().setWait(true));
// ensure that iterator is not applied
Assert.assertEquals(ImmutableSet.of("a", "b", "c"), getRows(c, tableName));
Assert.assertEquals(1, FunctionalTestUtils.countRFiles(c, tableName));
}
use of org.apache.accumulo.core.client.Connector in project accumulo by apache.
the class UserCompactionStrategyIT method testFileSize.
@Test
public void testFileSize() throws Exception {
Connector c = getConnector();
String tableName = getUniqueNames(1)[0];
c.tableOperations().create(tableName);
// write random data because its very unlikely it will compress
writeRandomValue(c, tableName, 1 << 16);
writeRandomValue(c, tableName, 1 << 16);
writeRandomValue(c, tableName, 1 << 9);
writeRandomValue(c, tableName, 1 << 7);
writeRandomValue(c, tableName, 1 << 6);
Assert.assertEquals(5, FunctionalTestUtils.countRFiles(c, tableName));
CompactionStrategyConfig csConfig = new CompactionStrategyConfig(SizeCompactionStrategy.class.getName());
csConfig.setOptions(ImmutableMap.of("size", "" + (1 << 15)));
c.tableOperations().compact(tableName, new CompactionConfig().setWait(true).setCompactionStrategy(csConfig));
Assert.assertEquals(3, FunctionalTestUtils.countRFiles(c, tableName));
csConfig = new CompactionStrategyConfig(SizeCompactionStrategy.class.getName());
csConfig.setOptions(ImmutableMap.of("size", "" + (1 << 17)));
c.tableOperations().compact(tableName, new CompactionConfig().setWait(true).setCompactionStrategy(csConfig));
Assert.assertEquals(1, FunctionalTestUtils.countRFiles(c, tableName));
}
use of org.apache.accumulo.core.client.Connector in project accumulo by apache.
the class UsersIT method testCreateExistingUser.
@Test
public void testCreateExistingUser() throws Exception {
ClusterUser user0 = getUser(0);
Connector conn = getConnector();
Set<String> currentUsers = conn.securityOperations().listLocalUsers();
// Ensure that the user exists
if (!currentUsers.contains(user0.getPrincipal())) {
PasswordToken token = null;
if (!getCluster().getClientConfig().hasSasl()) {
token = new PasswordToken(user0.getPassword());
}
conn.securityOperations().createLocalUser(user0.getPrincipal(), token);
}
try {
conn.securityOperations().createLocalUser(user0.getPrincipal(), new PasswordToken("better_fail"));
fail("Creating a user that already exists should throw an exception");
} catch (AccumuloSecurityException e) {
assertTrue("Expected USER_EXISTS error", SecurityErrorCode.USER_EXISTS == e.getSecurityErrorCode());
String msg = e.getMessage();
assertTrue("Error message didn't contain principal: '" + msg + "'", msg.contains(user0.getPrincipal()));
}
}
use of org.apache.accumulo.core.client.Connector in project accumulo by apache.
the class VerifySerialRecoveryIT method testSerializedRecovery.
@Test(timeout = 4 * 60 * 1000)
public void testSerializedRecovery() throws Exception {
// make a table with many splits
String tableName = getUniqueNames(1)[0];
Connector c = getConnector();
c.tableOperations().create(tableName);
SortedSet<Text> splits = new TreeSet<>();
for (int i = 0; i < 200; i++) {
splits.add(new Text(randomHex(8)));
}
c.tableOperations().addSplits(tableName, splits);
// load data to give the recovery something to do
BatchWriter bw = c.createBatchWriter(tableName, null);
for (int i = 0; i < 50000; i++) {
Mutation m = new Mutation(randomHex(8));
m.put("", "", "");
bw.addMutation(m);
}
bw.close();
// kill the tserver
for (ProcessReference ref : getCluster().getProcesses().get(ServerType.TABLET_SERVER)) getCluster().killProcess(ServerType.TABLET_SERVER, ref);
final Process ts = cluster.exec(TabletServer.class);
// wait for recovery
Iterators.size(c.createScanner(tableName, Authorizations.EMPTY).iterator());
assertEquals(0, cluster.exec(Admin.class, "stopAll").waitFor());
ts.waitFor();
String result = FunctionalTestUtils.readAll(cluster, TabletServer.class, ts);
for (String line : result.split("\n")) {
System.out.println(line);
}
// walk through the output, verifying that only a single normal recovery was running at one time
boolean started = false;
int recoveries = 0;
for (String line : result.split("\n")) {
// ignore metadata tables
if (line.contains("!0") || line.contains("+r"))
continue;
if (line.contains("Starting Write-Ahead Log")) {
assertFalse(started);
started = true;
recoveries++;
}
if (line.contains("Write-Ahead Log recovery complete")) {
assertTrue(started);
started = false;
}
}
assertFalse(started);
assertTrue(recoveries > 0);
}
use of org.apache.accumulo.core.client.Connector in project accumulo by apache.
the class AddSplitIT method addSplitTest.
@Test
public void addSplitTest() throws Exception {
String tableName = getUniqueNames(1)[0];
Connector c = getConnector();
c.tableOperations().create(tableName);
insertData(tableName, 1l);
TreeSet<Text> splits = new TreeSet<>();
splits.add(new Text(String.format("%09d", 333)));
splits.add(new Text(String.format("%09d", 666)));
c.tableOperations().addSplits(tableName, splits);
sleepUninterruptibly(100, TimeUnit.MILLISECONDS);
Collection<Text> actualSplits = c.tableOperations().listSplits(tableName);
if (!splits.equals(new TreeSet<>(actualSplits))) {
throw new Exception(splits + " != " + actualSplits);
}
verifyData(tableName, 1l);
insertData(tableName, 2l);
// did not clear splits on purpose, it should ignore existing split points
// and still create the three additional split points
splits.add(new Text(String.format("%09d", 200)));
splits.add(new Text(String.format("%09d", 500)));
splits.add(new Text(String.format("%09d", 800)));
c.tableOperations().addSplits(tableName, splits);
sleepUninterruptibly(100, TimeUnit.MILLISECONDS);
actualSplits = c.tableOperations().listSplits(tableName);
if (!splits.equals(new TreeSet<>(actualSplits))) {
throw new Exception(splits + " != " + actualSplits);
}
verifyData(tableName, 2l);
}
Aggregations