use of org.apache.accumulo.core.client.BatchWriterConfig in project accumulo by apache.
the class IMMLGBenchmark method write.
private static long write(Connector conn, ArrayList<byte[]> cfset, String table) throws TableNotFoundException, MutationsRejectedException {
Random rand = new Random();
byte[] val = new byte[50];
BatchWriter bw = conn.createBatchWriter(table, new BatchWriterConfig());
long t1 = System.currentTimeMillis();
for (int i = 0; i < 1 << 15; i++) {
byte[] row = FastFormat.toZeroPaddedString(abs(rand.nextLong()), 16, 16, new byte[0]);
Mutation m = new Mutation(row);
for (byte[] cf : cfset) {
byte[] cq = FastFormat.toZeroPaddedString(rand.nextInt(1 << 16), 4, 16, new byte[0]);
rand.nextBytes(val);
m.put(cf, cq, val);
}
bw.addMutation(m);
}
bw.close();
long t2 = System.currentTimeMillis();
return t2 - t1;
}
use of org.apache.accumulo.core.client.BatchWriterConfig in project accumulo by apache.
the class MasterRepairsDualAssignmentIT method test.
@Test
public void test() throws Exception {
// make some tablets, spread 'em around
Connector c = getConnector();
ClientContext context = new ClientContext(c.getInstance(), new Credentials("root", new PasswordToken(ROOT_PASSWORD)), getClientConfig());
String table = this.getUniqueNames(1)[0];
c.securityOperations().grantTablePermission("root", MetadataTable.NAME, TablePermission.WRITE);
c.securityOperations().grantTablePermission("root", RootTable.NAME, TablePermission.WRITE);
c.tableOperations().create(table);
SortedSet<Text> partitions = new TreeSet<>();
for (String part : "a b c d e f g h i j k l m n o p q r s t u v w x y z".split(" ")) {
partitions.add(new Text(part));
}
c.tableOperations().addSplits(table, partitions);
// scan the metadata table and get the two table location states
Set<TServerInstance> states = new HashSet<>();
Set<TabletLocationState> oldLocations = new HashSet<>();
MetaDataStateStore store = new MetaDataStateStore(context, null);
while (states.size() < 2) {
UtilWaitThread.sleep(250);
oldLocations.clear();
for (TabletLocationState tls : store) {
if (tls.current != null) {
states.add(tls.current);
oldLocations.add(tls);
}
}
}
assertEquals(2, states.size());
// Kill a tablet server... we don't care which one... wait for everything to be reassigned
cluster.killProcess(ServerType.TABLET_SERVER, cluster.getProcesses().get(ServerType.TABLET_SERVER).iterator().next());
Set<TServerInstance> replStates = new HashSet<>();
// Find out which tablet server remains
while (true) {
UtilWaitThread.sleep(1000);
states.clear();
replStates.clear();
boolean allAssigned = true;
for (TabletLocationState tls : store) {
if (tls != null && tls.current != null) {
states.add(tls.current);
} else if (tls != null && tls.extent.equals(new KeyExtent(ReplicationTable.ID, null, null))) {
replStates.add(tls.current);
} else {
allAssigned = false;
}
}
System.out.println(states + " size " + states.size() + " allAssigned " + allAssigned);
if (states.size() != 2 && allAssigned)
break;
}
assertEquals(1, replStates.size());
assertEquals(1, states.size());
// pick an assigned tablet and assign it to the old tablet
TabletLocationState moved = null;
for (TabletLocationState old : oldLocations) {
if (!states.contains(old.current)) {
moved = old;
}
}
assertNotEquals(null, moved);
// throw a mutation in as if we were the dying tablet
BatchWriter bw = c.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
Mutation assignment = new Mutation(moved.extent.getMetadataEntry());
moved.current.putLocation(assignment);
bw.addMutation(assignment);
bw.close();
// wait for the master to fix the problem
waitForCleanStore(store);
// now jam up the metadata table
bw = c.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
assignment = new Mutation(new KeyExtent(MetadataTable.ID, null, null).getMetadataEntry());
moved.current.putLocation(assignment);
bw.addMutation(assignment);
bw.close();
waitForCleanStore(new RootTabletStateStore(context, null));
}
use of org.apache.accumulo.core.client.BatchWriterConfig in project accumulo-examples by apache.
the class CountIT method setupInstance.
@Before
public void setupInstance() throws Exception {
tableName = getUniqueNames(1)[0];
conn = getConnector();
conn.tableOperations().create(tableName);
BatchWriter bw = conn.createBatchWriter(tableName, new BatchWriterConfig());
ColumnVisibility cv = new ColumnVisibility();
// / has 1 dir
// /local has 2 dirs 1 file
// /local/user1 has 2 files
bw.addMutation(Ingest.buildMutation(cv, "/local", true, false, true, 272, 12345, null));
bw.addMutation(Ingest.buildMutation(cv, "/local/user1", true, false, true, 272, 12345, null));
bw.addMutation(Ingest.buildMutation(cv, "/local/user2", true, false, true, 272, 12345, null));
bw.addMutation(Ingest.buildMutation(cv, "/local/file", false, false, false, 1024, 12345, null));
bw.addMutation(Ingest.buildMutation(cv, "/local/file", false, false, false, 1024, 23456, null));
bw.addMutation(Ingest.buildMutation(cv, "/local/user1/file1", false, false, false, 2024, 12345, null));
bw.addMutation(Ingest.buildMutation(cv, "/local/user1/file2", false, false, false, 1028, 23456, null));
bw.close();
}
use of org.apache.accumulo.core.client.BatchWriterConfig in project accumulo-examples by apache.
the class ChunkInputFormatIT method testInfoWithoutChunks.
@Test
public void testInfoWithoutChunks() throws Exception {
conn.tableOperations().create(tableName);
BatchWriter bw = conn.createBatchWriter(tableName, new BatchWriterConfig());
for (Entry<Key, Value> e : baddata) {
Key k = e.getKey();
Mutation m = new Mutation(k.getRow());
m.put(k.getColumnFamily(), k.getColumnQualifier(), new ColumnVisibility(k.getColumnVisibility()), k.getTimestamp(), e.getValue());
bw.addMutation(m);
}
bw.close();
assertEquals(0, CIFTester.main(tableName, CIFTester.TestBadData.class.getName()));
assertEquals(1, assertionErrors.get(tableName).size());
}
use of org.apache.accumulo.core.client.BatchWriterConfig in project accumulo-examples by apache.
the class ChunkInputStreamIT method testWithAccumulo.
@Test
public void testWithAccumulo() throws AccumuloException, AccumuloSecurityException, TableExistsException, TableNotFoundException, IOException {
conn.tableOperations().create(tableName);
BatchWriter bw = conn.createBatchWriter(tableName, new BatchWriterConfig());
for (Entry<Key, Value> e : data) {
Key k = e.getKey();
Mutation m = new Mutation(k.getRow());
m.put(k.getColumnFamily(), k.getColumnQualifier(), new ColumnVisibility(k.getColumnVisibility()), e.getValue());
bw.addMutation(m);
}
bw.close();
Scanner scan = conn.createScanner(tableName, AUTHS);
ChunkInputStream cis = new ChunkInputStream();
byte[] b = new byte[20];
int read;
PeekingIterator<Entry<Key, Value>> pi = new PeekingIterator<>(scan.iterator());
cis.setSource(pi);
assertEquals(read = cis.read(b), 8);
assertEquals(new String(b, 0, read), "asdfjkl;");
assertEquals(read = cis.read(b), -1);
cis.setSource(pi);
assertEquals(read = cis.read(b), 10);
assertEquals(new String(b, 0, read), "qwertyuiop");
assertEquals(read = cis.read(b), -1);
assertEquals(cis.getVisibilities().toString(), "[A&B, B&C, D]");
cis.close();
cis.setSource(pi);
assertEquals(read = cis.read(b), 16);
assertEquals(new String(b, 0, read), "asdfjkl;asdfjkl;");
assertEquals(read = cis.read(b), -1);
assertEquals(cis.getVisibilities().toString(), "[A&B]");
cis.close();
cis.setSource(pi);
assertEquals(read = cis.read(b), -1);
cis.close();
cis.setSource(pi);
assertEquals(read = cis.read(b), 8);
assertEquals(new String(b, 0, read), "asdfjkl;");
assertEquals(read = cis.read(b), -1);
cis.close();
assertFalse(pi.hasNext());
}
Aggregations