Search in sources :

Example 16 with ProcessReference

use of org.apache.accumulo.minicluster.impl.ProcessReference in project accumulo by apache.

the class KerberosReplicationIT method dataReplicatedToCorrectTable.

@Test
public void dataReplicatedToCorrectTable() throws Exception {
    // Login as the root user
    final UserGroupInformation ugi = UserGroupInformation.loginUserFromKeytabAndReturnUGI(rootUser.getPrincipal(), rootUser.getKeytab().toURI().toString());
    ugi.doAs(new PrivilegedExceptionAction<Void>() {

        @Override
        public Void run() throws Exception {
            log.info("testing {}", ugi);
            final KerberosToken token = new KerberosToken();
            final Connector primaryConn = primary.getConnector(rootUser.getPrincipal(), token);
            final Connector peerConn = peer.getConnector(rootUser.getPrincipal(), token);
            ClusterUser replicationUser = kdc.getClientPrincipal(0);
            // Create user for replication to the peer
            peerConn.securityOperations().createLocalUser(replicationUser.getPrincipal(), null);
            primaryConn.instanceOperations().setProperty(Property.REPLICATION_PEER_USER.getKey() + PEER_NAME, replicationUser.getPrincipal());
            primaryConn.instanceOperations().setProperty(Property.REPLICATION_PEER_KEYTAB.getKey() + PEER_NAME, replicationUser.getKeytab().getAbsolutePath());
            // ...peer = AccumuloReplicaSystem,instanceName,zookeepers
            primaryConn.instanceOperations().setProperty(Property.REPLICATION_PEERS.getKey() + PEER_NAME, ReplicaSystemFactory.getPeerConfigurationValue(AccumuloReplicaSystem.class, AccumuloReplicaSystem.buildConfiguration(peerConn.getInstance().getInstanceName(), peerConn.getInstance().getZooKeepers())));
            String primaryTable1 = "primary", peerTable1 = "peer";
            // Create tables
            primaryConn.tableOperations().create(primaryTable1);
            String masterTableId1 = primaryConn.tableOperations().tableIdMap().get(primaryTable1);
            Assert.assertNotNull(masterTableId1);
            peerConn.tableOperations().create(peerTable1);
            String peerTableId1 = peerConn.tableOperations().tableIdMap().get(peerTable1);
            Assert.assertNotNull(peerTableId1);
            // Grant write permission
            peerConn.securityOperations().grantTablePermission(replicationUser.getPrincipal(), peerTable1, TablePermission.WRITE);
            // Replicate this table to the peerClusterName in a table with the peerTableId table id
            primaryConn.tableOperations().setProperty(primaryTable1, Property.TABLE_REPLICATION.getKey(), "true");
            primaryConn.tableOperations().setProperty(primaryTable1, Property.TABLE_REPLICATION_TARGET.getKey() + PEER_NAME, peerTableId1);
            // Write some data to table1
            BatchWriter bw = primaryConn.createBatchWriter(primaryTable1, new BatchWriterConfig());
            long masterTable1Records = 0l;
            for (int rows = 0; rows < 2500; rows++) {
                Mutation m = new Mutation(primaryTable1 + rows);
                for (int cols = 0; cols < 100; cols++) {
                    String value = Integer.toString(cols);
                    m.put(value, "", value);
                    masterTable1Records++;
                }
                bw.addMutation(m);
            }
            bw.close();
            log.info("Wrote all data to primary cluster");
            Set<String> filesFor1 = primaryConn.replicationOperations().referencedFiles(primaryTable1);
            // Restart the tserver to force a close on the WAL
            for (ProcessReference proc : primary.getProcesses().get(ServerType.TABLET_SERVER)) {
                primary.killProcess(ServerType.TABLET_SERVER, proc);
            }
            primary.exec(TabletServer.class);
            log.info("Restarted the tserver");
            // Read the data -- the tserver is back up and running and tablets are assigned
            Iterators.size(primaryConn.createScanner(primaryTable1, Authorizations.EMPTY).iterator());
            // Wait for both tables to be replicated
            log.info("Waiting for {} for {}", filesFor1, primaryTable1);
            primaryConn.replicationOperations().drain(primaryTable1, filesFor1);
            long countTable = 0l;
            for (Entry<Key, Value> entry : peerConn.createScanner(peerTable1, Authorizations.EMPTY)) {
                countTable++;
                Assert.assertTrue("Found unexpected key-value" + entry.getKey().toStringNoTruncate() + " " + entry.getValue(), entry.getKey().getRow().toString().startsWith(primaryTable1));
            }
            log.info("Found {} records in {}", countTable, peerTable1);
            Assert.assertEquals(masterTable1Records, countTable);
            return null;
        }
    });
}
Also used : Connector(org.apache.accumulo.core.client.Connector) ProcessReference(org.apache.accumulo.minicluster.impl.ProcessReference) Set(java.util.Set) KerberosToken(org.apache.accumulo.core.client.security.tokens.KerberosToken) Entry(java.util.Map.Entry) TabletServer(org.apache.accumulo.tserver.TabletServer) BatchWriterConfig(org.apache.accumulo.core.client.BatchWriterConfig) ClusterUser(org.apache.accumulo.cluster.ClusterUser) BatchWriter(org.apache.accumulo.core.client.BatchWriter) Mutation(org.apache.accumulo.core.data.Mutation) UserGroupInformation(org.apache.hadoop.security.UserGroupInformation) Test(org.junit.Test)

Aggregations

ProcessReference (org.apache.accumulo.minicluster.impl.ProcessReference)16 Test (org.junit.Test)16 Connector (org.apache.accumulo.core.client.Connector)15 BatchWriter (org.apache.accumulo.core.client.BatchWriter)13 Mutation (org.apache.accumulo.core.data.Mutation)13 BatchWriterConfig (org.apache.accumulo.core.client.BatchWriterConfig)11 MiniAccumuloClusterImpl (org.apache.accumulo.minicluster.impl.MiniAccumuloClusterImpl)11 Scanner (org.apache.accumulo.core.client.Scanner)10 PasswordToken (org.apache.accumulo.core.client.security.tokens.PasswordToken)10 Key (org.apache.accumulo.core.data.Key)10 Value (org.apache.accumulo.core.data.Value)10 MiniAccumuloConfigImpl (org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl)10 AccumuloReplicaSystem (org.apache.accumulo.tserver.replication.AccumuloReplicaSystem)9 PartialKey (org.apache.accumulo.core.data.PartialKey)8 Entry (java.util.Map.Entry)7 File (java.io.File)2 TreeSet (java.util.TreeSet)2 TimeoutException (java.util.concurrent.TimeoutException)2 Status (org.apache.accumulo.server.replication.proto.Replication.Status)2 Text (org.apache.hadoop.io.Text)2