use of org.apache.accumulo.core.client.BatchWriterConfig in project accumulo by apache.
the class CleanTmpIT method test.
@Test
public void test() throws Exception {
Connector c = getConnector();
// make a table
String tableName = getUniqueNames(1)[0];
c.tableOperations().create(tableName);
// write to it
BatchWriter bw = c.createBatchWriter(tableName, new BatchWriterConfig());
Mutation m = new Mutation("row");
m.put("cf", "cq", "value");
bw.addMutation(m);
bw.flush();
// Compact memory to make a file
c.tableOperations().compact(tableName, null, null, true, true);
// Make sure that we'll have a WAL
m = new Mutation("row2");
m.put("cf", "cq", "value");
bw.addMutation(m);
bw.close();
// create a fake _tmp file in its directory
String id = c.tableOperations().tableIdMap().get(tableName);
Path file;
try (Scanner s = c.createScanner(MetadataTable.NAME, Authorizations.EMPTY)) {
s.setRange(Range.prefix(id));
s.fetchColumnFamily(MetadataSchema.TabletsSection.DataFileColumnFamily.NAME);
Entry<Key, Value> entry = Iterables.getOnlyElement(s);
file = new Path(entry.getKey().getColumnQualifier().toString());
}
FileSystem fs = getCluster().getFileSystem();
assertTrue("Could not find file: " + file, fs.exists(file));
Path tabletDir = file.getParent();
assertNotNull("Tablet dir should not be null", tabletDir);
Path tmp = new Path(tabletDir, "junk.rf_tmp");
// Make the file
fs.create(tmp).close();
log.info("Created tmp file {}", tmp.toString());
getCluster().stop();
getCluster().start();
try (Scanner scanner = c.createScanner(tableName, Authorizations.EMPTY)) {
assertEquals(2, Iterators.size(scanner.iterator()));
// If we performed log recovery, we should have cleaned up any stray files
assertFalse("File still exists: " + tmp, fs.exists(tmp));
}
}
use of org.apache.accumulo.core.client.BatchWriterConfig in project accumulo by apache.
the class CleanUpIT method run.
@Test
public void run() throws Exception {
String tableName = getUniqueNames(1)[0];
getConnector().tableOperations().create(tableName);
BatchWriter bw = getConnector().createBatchWriter(tableName, new BatchWriterConfig());
Mutation m1 = new Mutation("r1");
m1.put("cf1", "cq1", 1, "5");
bw.addMutation(m1);
bw.flush();
try (Scanner scanner = getConnector().createScanner(tableName, new Authorizations())) {
int count = 0;
for (Entry<Key, Value> entry : scanner) {
count++;
if (!entry.getValue().toString().equals("5")) {
Assert.fail("Unexpected value " + entry.getValue());
}
}
Assert.assertEquals("Unexpected count", 1, count);
int threadCount = countThreads();
if (threadCount < 2) {
printThreadNames();
Assert.fail("Not seeing expected threads. Saw " + threadCount);
}
CleanUp.shutdownNow();
Mutation m2 = new Mutation("r2");
m2.put("cf1", "cq1", 1, "6");
try {
bw.addMutation(m1);
bw.flush();
Assert.fail("batch writer did not fail");
} catch (Exception e) {
}
try {
// expect this to fail also, want to clean up batch writer threads
bw.close();
Assert.fail("batch writer close not fail");
} catch (Exception e) {
}
try {
count = 0;
Iterator<Entry<Key, Value>> iter = scanner.iterator();
while (iter.hasNext()) {
iter.next();
count++;
}
Assert.fail("scanner did not fail");
} catch (Exception e) {
}
threadCount = countThreads();
if (threadCount > 0) {
printThreadNames();
Assert.fail("Threads did not go away. Saw " + threadCount);
}
}
}
use of org.apache.accumulo.core.client.BatchWriterConfig in project accumulo by apache.
the class CloneTestIT method writeData.
private BatchWriter writeData(String table1, Connector c) throws TableNotFoundException, MutationsRejectedException {
BatchWriter bw = c.createBatchWriter(table1, new BatchWriterConfig());
Mutation m1 = new Mutation("001");
m1.put("data", "x", "9");
m1.put("data", "y", "7");
Mutation m2 = new Mutation("008");
m2.put("data", "x", "3");
m2.put("data", "y", "4");
bw.addMutation(m1);
bw.addMutation(m2);
bw.flush();
return bw;
}
use of org.apache.accumulo.core.client.BatchWriterConfig in project accumulo by apache.
the class CombinerIT method aggregationTest.
@Test
public void aggregationTest() throws Exception {
Connector c = getConnector();
String tableName = getUniqueNames(1)[0];
c.tableOperations().create(tableName);
IteratorSetting setting = new IteratorSetting(10, SummingCombiner.class);
SummingCombiner.setEncodingType(setting, Type.STRING);
SummingCombiner.setColumns(setting, Collections.singletonList(new IteratorSetting.Column("cf")));
c.tableOperations().attachIterator(tableName, setting);
BatchWriter bw = c.createBatchWriter(tableName, new BatchWriterConfig());
for (int i = 0; i < 10; i++) {
Mutation m = new Mutation("row1");
m.put("cf".getBytes(), "col1".getBytes(), ("" + i).getBytes());
bw.addMutation(m);
}
bw.close();
checkSum(tableName, c);
}
use of org.apache.accumulo.core.client.BatchWriterConfig in project accumulo by apache.
the class ConcurrencyIT method runTest.
static void runTest(Connector c, String tableName) throws AccumuloException, AccumuloSecurityException, TableExistsException, TableNotFoundException, MutationsRejectedException, Exception, InterruptedException {
c.tableOperations().create(tableName);
IteratorSetting is = new IteratorSetting(10, SlowIterator.class);
SlowIterator.setSleepTime(is, 50);
c.tableOperations().attachIterator(tableName, is, EnumSet.of(IteratorScope.minc, IteratorScope.majc));
c.tableOperations().setProperty(tableName, Property.TABLE_MAJC_RATIO.getKey(), "1.0");
BatchWriter bw = c.createBatchWriter(tableName, new BatchWriterConfig());
for (int i = 0; i < 50; i++) {
Mutation m = new Mutation(new Text(String.format("%06d", i)));
m.put(new Text("cf1"), new Text("cq1"), new Value("foo".getBytes(UTF_8)));
bw.addMutation(m);
}
bw.flush();
ScanTask st0 = new ScanTask(c, tableName, 300);
st0.start();
ScanTask st1 = new ScanTask(c, tableName, 100);
st1.start();
sleepUninterruptibly(50, TimeUnit.MILLISECONDS);
c.tableOperations().flush(tableName, null, null, true);
for (int i = 0; i < 50; i++) {
Mutation m = new Mutation(new Text(String.format("%06d", i)));
m.put(new Text("cf1"), new Text("cq1"), new Value("foo".getBytes(UTF_8)));
bw.addMutation(m);
}
bw.flush();
ScanTask st2 = new ScanTask(c, tableName, 100);
st2.start();
st1.join();
st2.join();
if (st1.count != 50)
throw new Exception("Thread 1 did not see 50, saw " + st1.count);
if (st2.count != 50)
throw new Exception("Thread 2 did not see 50, saw " + st2.count);
ScanTask st3 = new ScanTask(c, tableName, 150);
st3.start();
sleepUninterruptibly(50, TimeUnit.MILLISECONDS);
c.tableOperations().flush(tableName, null, null, false);
st3.join();
if (st3.count != 50)
throw new Exception("Thread 3 did not see 50, saw " + st3.count);
st0.join();
if (st0.count != 50)
throw new Exception("Thread 0 did not see 50, saw " + st0.count);
bw.close();
}
Aggregations