Search in sources :

Example 61 with BatchWriter

use of org.apache.accumulo.core.client.BatchWriter in project accumulo by apache.

the class KerberosIT method testDelegationToken.

@Test
public void testDelegationToken() throws Exception {
    final String tableName = getUniqueNames(1)[0];
    // Login as the "root" user
    UserGroupInformation root = UserGroupInformation.loginUserFromKeytabAndReturnUGI(rootUser.getPrincipal(), rootUser.getKeytab().getAbsolutePath());
    log.info("Logged in as {}", rootUser.getPrincipal());
    final int numRows = 100, numColumns = 10;
    // As the "root" user, open up the connection and get a delegation token
    final AuthenticationToken delegationToken = root.doAs(new PrivilegedExceptionAction<AuthenticationToken>() {

        @Override
        public AuthenticationToken run() throws Exception {
            Connector conn = mac.getConnector(rootUser.getPrincipal(), new KerberosToken());
            log.info("Created connector as {}", rootUser.getPrincipal());
            assertEquals(rootUser.getPrincipal(), conn.whoami());
            conn.tableOperations().create(tableName);
            BatchWriter bw = conn.createBatchWriter(tableName, new BatchWriterConfig());
            for (int r = 0; r < numRows; r++) {
                Mutation m = new Mutation(Integer.toString(r));
                for (int c = 0; c < numColumns; c++) {
                    String col = Integer.toString(c);
                    m.put(col, col, col);
                }
                bw.addMutation(m);
            }
            bw.close();
            return conn.securityOperations().getDelegationToken(new DelegationTokenConfig());
        }
    });
    // The above login with keytab doesn't have a way to logout, so make a fake user that won't have krb credentials
    UserGroupInformation userWithoutPrivs = UserGroupInformation.createUserForTesting("fake_user", new String[0]);
    int recordsSeen = userWithoutPrivs.doAs(new PrivilegedExceptionAction<Integer>() {

        @Override
        public Integer run() throws Exception {
            Connector conn = mac.getConnector(rootUser.getPrincipal(), delegationToken);
            try (BatchScanner bs = conn.createBatchScanner(tableName, Authorizations.EMPTY, 2)) {
                bs.setRanges(Collections.singleton(new Range()));
                int recordsSeen = Iterables.size(bs);
                return recordsSeen;
            }
        }
    });
    assertEquals(numRows * numColumns, recordsSeen);
}
Also used : Connector(org.apache.accumulo.core.client.Connector) AuthenticationToken(org.apache.accumulo.core.client.security.tokens.AuthenticationToken) DelegationTokenConfig(org.apache.accumulo.core.client.admin.DelegationTokenConfig) KerberosToken(org.apache.accumulo.core.client.security.tokens.KerberosToken) BatchScanner(org.apache.accumulo.core.client.BatchScanner) Range(org.apache.accumulo.core.data.Range) TableNotFoundException(org.apache.accumulo.core.client.TableNotFoundException) TableExistsException(org.apache.accumulo.core.client.TableExistsException) AccumuloSecurityException(org.apache.accumulo.core.client.AccumuloSecurityException) UndeclaredThrowableException(java.lang.reflect.UndeclaredThrowableException) AccumuloException(org.apache.accumulo.core.client.AccumuloException) BatchWriterConfig(org.apache.accumulo.core.client.BatchWriterConfig) BatchWriter(org.apache.accumulo.core.client.BatchWriter) Mutation(org.apache.accumulo.core.data.Mutation) UserGroupInformation(org.apache.hadoop.security.UserGroupInformation) Test(org.junit.Test)

Example 62 with BatchWriter

use of org.apache.accumulo.core.client.BatchWriter in project accumulo by apache.

the class KerberosRenewalIT method createReadWriteDrop.

/**
 * Creates a table, adds a record to it, and then compacts the table. A simple way to make sure that the system user exists (since the master does an RPC to
 * the tserver which will create the system user if it doesn't already exist).
 */
private void createReadWriteDrop(Connector conn) throws TableNotFoundException, AccumuloSecurityException, AccumuloException, TableExistsException {
    final String table = testName.getMethodName() + "_table";
    conn.tableOperations().create(table);
    BatchWriter bw = conn.createBatchWriter(table, new BatchWriterConfig());
    Mutation m = new Mutation("a");
    m.put("b", "c", "d");
    bw.addMutation(m);
    bw.close();
    conn.tableOperations().compact(table, new CompactionConfig().setFlush(true).setWait(true));
    try (Scanner s = conn.createScanner(table, Authorizations.EMPTY)) {
        Entry<Key, Value> entry = Iterables.getOnlyElement(s);
        assertEquals("Did not find the expected key", 0, new Key("a", "b", "c").compareTo(entry.getKey(), PartialKey.ROW_COLFAM_COLQUAL));
        assertEquals("d", entry.getValue().toString());
        conn.tableOperations().delete(table);
    }
}
Also used : Scanner(org.apache.accumulo.core.client.Scanner) CompactionConfig(org.apache.accumulo.core.client.admin.CompactionConfig) Value(org.apache.accumulo.core.data.Value) BatchWriterConfig(org.apache.accumulo.core.client.BatchWriterConfig) BatchWriter(org.apache.accumulo.core.client.BatchWriter) Mutation(org.apache.accumulo.core.data.Mutation) Key(org.apache.accumulo.core.data.Key) PartialKey(org.apache.accumulo.core.data.PartialKey)

Example 63 with BatchWriter

use of org.apache.accumulo.core.client.BatchWriter in project accumulo by apache.

the class LargeRowIT method basicTest.

private void basicTest(Connector c, String table, int expectedSplits) throws Exception {
    BatchWriter bw = c.createBatchWriter(table, new BatchWriterConfig());
    Random r = new Random();
    byte[] rowData = new byte[ROW_SIZE];
    r.setSeed(SEED);
    for (int i = 0; i < NUM_ROWS; i++) {
        r.nextBytes(rowData);
        TestIngest.toPrintableChars(rowData);
        Mutation mut = new Mutation(new Text(rowData));
        mut.put(new Text(""), new Text(""), new Value(Integer.toString(i).getBytes(UTF_8)));
        bw.addMutation(mut);
    }
    bw.close();
    FunctionalTestUtils.checkSplits(c, table, expectedSplits, expectedSplits);
    verify(c, table);
    FunctionalTestUtils.checkSplits(c, table, expectedSplits, expectedSplits);
    c.tableOperations().flush(table, null, null, false);
    // verify while table flush is running
    verify(c, table);
    // give split time to complete
    c.tableOperations().flush(table, null, null, true);
    FunctionalTestUtils.checkSplits(c, table, expectedSplits, expectedSplits);
    verify(c, table);
    FunctionalTestUtils.checkSplits(c, table, expectedSplits, expectedSplits);
}
Also used : Random(java.util.Random) Value(org.apache.accumulo.core.data.Value) BatchWriterConfig(org.apache.accumulo.core.client.BatchWriterConfig) Text(org.apache.hadoop.io.Text) BatchWriter(org.apache.accumulo.core.client.BatchWriter) Mutation(org.apache.accumulo.core.data.Mutation)

Example 64 with BatchWriter

use of org.apache.accumulo.core.client.BatchWriter in project accumulo by apache.

the class MapReduceIT method runTest.

static void runTest(Connector c, MiniAccumuloClusterImpl cluster) throws AccumuloException, AccumuloSecurityException, TableExistsException, TableNotFoundException, MutationsRejectedException, IOException, InterruptedException, NoSuchAlgorithmException {
    c.tableOperations().create(tablename);
    BatchWriter bw = c.createBatchWriter(tablename, new BatchWriterConfig());
    for (int i = 0; i < 10; i++) {
        Mutation m = new Mutation("" + i);
        m.put(input_cf, input_cq, "row" + i);
        bw.addMutation(m);
    }
    bw.close();
    Process hash = cluster.exec(RowHash.class, Collections.singletonList(hadoopTmpDirArg), "-i", c.getInstance().getInstanceName(), "-z", c.getInstance().getZooKeepers(), "-u", "root", "-p", ROOT_PASSWORD, "-t", tablename, "--column", input_cfcq);
    assertEquals(0, hash.waitFor());
    try (Scanner s = c.createScanner(tablename, Authorizations.EMPTY)) {
        s.fetchColumn(new Text(input_cf), new Text(output_cq));
        int i = 0;
        for (Entry<Key, Value> entry : s) {
            MessageDigest md = MessageDigest.getInstance("MD5");
            byte[] check = Base64.getEncoder().encode(md.digest(("row" + i).getBytes()));
            assertEquals(entry.getValue().toString(), new String(check));
            i++;
        }
    }
}
Also used : Scanner(org.apache.accumulo.core.client.Scanner) Text(org.apache.hadoop.io.Text) Value(org.apache.accumulo.core.data.Value) BatchWriterConfig(org.apache.accumulo.core.client.BatchWriterConfig) BatchWriter(org.apache.accumulo.core.client.BatchWriter) Mutation(org.apache.accumulo.core.data.Mutation) MessageDigest(java.security.MessageDigest) Key(org.apache.accumulo.core.data.Key)

Example 65 with BatchWriter

use of org.apache.accumulo.core.client.BatchWriter in project accumulo by apache.

the class MasterAssignmentIT method test.

@Test
public void test() throws Exception {
    Connector c = getConnector();
    String tableName = super.getUniqueNames(1)[0];
    c.tableOperations().create(tableName);
    String tableId = c.tableOperations().tableIdMap().get(tableName);
    // wait for the table to be online
    TabletLocationState newTablet;
    do {
        UtilWaitThread.sleep(250);
        newTablet = getTabletLocationState(c, tableId);
    } while (newTablet.current == null);
    assertNull(newTablet.last);
    assertNull(newTablet.future);
    // put something in it
    BatchWriter bw = c.createBatchWriter(tableName, new BatchWriterConfig());
    Mutation m = new Mutation("a");
    m.put("b", "c", "d");
    bw.addMutation(m);
    bw.close();
    // give it a last location
    c.tableOperations().flush(tableName, null, null, true);
    TabletLocationState flushed = getTabletLocationState(c, tableId);
    assertEquals(newTablet.current, flushed.current);
    assertEquals(flushed.current, flushed.last);
    assertNull(newTablet.future);
    // take the tablet offline
    c.tableOperations().offline(tableName, true);
    TabletLocationState offline = getTabletLocationState(c, tableId);
    assertNull(offline.future);
    assertNull(offline.current);
    assertEquals(flushed.current, offline.last);
    // put it back online
    c.tableOperations().online(tableName, true);
    TabletLocationState online = getTabletLocationState(c, tableId);
    assertNull(online.future);
    assertNotNull(online.current);
    assertEquals(online.current, online.last);
}
Also used : Connector(org.apache.accumulo.core.client.Connector) TabletLocationState(org.apache.accumulo.server.master.state.TabletLocationState) BatchWriterConfig(org.apache.accumulo.core.client.BatchWriterConfig) BatchWriter(org.apache.accumulo.core.client.BatchWriter) Mutation(org.apache.accumulo.core.data.Mutation) Test(org.junit.Test)

Aggregations

BatchWriter (org.apache.accumulo.core.client.BatchWriter)402 Mutation (org.apache.accumulo.core.data.Mutation)360 Test (org.junit.Test)264 Value (org.apache.accumulo.core.data.Value)250 BatchWriterConfig (org.apache.accumulo.core.client.BatchWriterConfig)246 Text (org.apache.hadoop.io.Text)194 Key (org.apache.accumulo.core.data.Key)179 Scanner (org.apache.accumulo.core.client.Scanner)174 Connector (org.apache.accumulo.core.client.Connector)169 IteratorSetting (org.apache.accumulo.core.client.IteratorSetting)81 Authorizations (org.apache.accumulo.core.security.Authorizations)68 Range (org.apache.accumulo.core.data.Range)61 Entry (java.util.Map.Entry)51 Map (java.util.Map)50 BatchScanner (org.apache.accumulo.core.client.BatchScanner)46 MutationsRejectedException (org.apache.accumulo.core.client.MutationsRejectedException)44 TableNotFoundException (org.apache.accumulo.core.client.TableNotFoundException)40 HashMap (java.util.HashMap)38 ArrayList (java.util.ArrayList)36 Status (org.apache.accumulo.server.replication.proto.Replication.Status)32