Search in sources :

Example 41 with ZooKeeperInstance

use of org.apache.accumulo.core.client.ZooKeeperInstance in project accumulo by apache.

the class SuspendedTabletsIT method suspensionTestBody.

/**
 * Main test body for suspension tests.
 *
 * @param serverStopper
 *          callback which shuts down some tablet servers.
 */
private void suspensionTestBody(TServerKiller serverStopper) throws Exception {
    Credentials creds = new Credentials("root", new PasswordToken(ROOT_PASSWORD));
    Instance instance = new ZooKeeperInstance(getCluster().getClientConfig());
    ClientContext ctx = new ClientContext(instance, creds, getCluster().getClientConfig());
    String tableName = getUniqueNames(1)[0];
    Connector conn = ctx.getConnector();
    // Create a table with a bunch of splits
    log.info("Creating table " + tableName);
    conn.tableOperations().create(tableName);
    SortedSet<Text> splitPoints = new TreeSet<>();
    for (int i = 1; i < TABLETS; ++i) {
        splitPoints.add(new Text("" + i));
    }
    conn.tableOperations().addSplits(tableName, splitPoints);
    // Wait for all of the tablets to hosted ...
    log.info("Waiting on hosting and balance");
    TabletLocations ds;
    for (ds = TabletLocations.retrieve(ctx, tableName); ds.hostedCount != TABLETS; ds = TabletLocations.retrieve(ctx, tableName)) {
        Thread.sleep(1000);
    }
    // ... and balanced.
    conn.instanceOperations().waitForBalance();
    do {
        // Give at least another 5 seconds for migrations to finish up
        Thread.sleep(5000);
        ds = TabletLocations.retrieve(ctx, tableName);
    } while (ds.hostedCount != TABLETS);
    // Pray all of our tservers have at least 1 tablet.
    Assert.assertEquals(TSERVERS, ds.hosted.keySet().size());
    // Kill two tablet servers hosting our tablets. This should put tablets into suspended state, and thus halt balancing.
    TabletLocations beforeDeathState = ds;
    log.info("Eliminating tablet servers");
    serverStopper.eliminateTabletServers(ctx, beforeDeathState, 2);
    // Eventually some tablets will be suspended.
    log.info("Waiting on suspended tablets");
    ds = TabletLocations.retrieve(ctx, tableName);
    // Until we can scan the metadata table, the master probably can't either, so won't have been able to suspend the tablets.
    // So we note the time that we were first able to successfully scan the metadata table.
    long killTime = System.nanoTime();
    while (ds.suspended.keySet().size() != 2) {
        Thread.sleep(1000);
        ds = TabletLocations.retrieve(ctx, tableName);
    }
    SetMultimap<HostAndPort, KeyExtent> deadTabletsByServer = ds.suspended;
    // "belong" to the dead tablet servers, and should be in exactly the same place as before any tserver death.
    for (HostAndPort server : deadTabletsByServer.keySet()) {
        Assert.assertEquals(deadTabletsByServer.get(server), beforeDeathState.hosted.get(server));
    }
    Assert.assertEquals(TABLETS, ds.hostedCount + ds.suspendedCount);
    // Restart the first tablet server, making sure it ends up on the same port
    HostAndPort restartedServer = deadTabletsByServer.keySet().iterator().next();
    log.info("Restarting " + restartedServer);
    getCluster().getClusterControl().start(ServerType.TABLET_SERVER, null, ImmutableMap.of(Property.TSERV_CLIENTPORT.getKey(), "" + restartedServer.getPort(), Property.TSERV_PORTSEARCH.getKey(), "false"), 1);
    // Eventually, the suspended tablets should be reassigned to the newly alive tserver.
    log.info("Awaiting tablet unsuspension for tablets belonging to " + restartedServer);
    for (ds = TabletLocations.retrieve(ctx, tableName); ds.suspended.containsKey(restartedServer) || ds.assignedCount != 0; ds = TabletLocations.retrieve(ctx, tableName)) {
        Thread.sleep(1000);
    }
    Assert.assertEquals(deadTabletsByServer.get(restartedServer), ds.hosted.get(restartedServer));
    // Finally, after much longer, remaining suspended tablets should be reassigned.
    log.info("Awaiting tablet reassignment for remaining tablets");
    for (ds = TabletLocations.retrieve(ctx, tableName); ds.hostedCount != TABLETS; ds = TabletLocations.retrieve(ctx, tableName)) {
        Thread.sleep(1000);
    }
    long recoverTime = System.nanoTime();
    Assert.assertTrue(recoverTime - killTime >= NANOSECONDS.convert(SUSPEND_DURATION, MILLISECONDS));
}
Also used : Connector(org.apache.accumulo.core.client.Connector) Instance(org.apache.accumulo.core.client.Instance) TServerInstance(org.apache.accumulo.server.master.state.TServerInstance) ZooKeeperInstance(org.apache.accumulo.core.client.ZooKeeperInstance) ClientContext(org.apache.accumulo.core.client.impl.ClientContext) Text(org.apache.hadoop.io.Text) KeyExtent(org.apache.accumulo.core.data.impl.KeyExtent) ZooKeeperInstance(org.apache.accumulo.core.client.ZooKeeperInstance) HostAndPort(org.apache.accumulo.core.util.HostAndPort) PasswordToken(org.apache.accumulo.core.client.security.tokens.PasswordToken) TreeSet(java.util.TreeSet) Credentials(org.apache.accumulo.core.client.impl.Credentials)

Example 42 with ZooKeeperInstance

use of org.apache.accumulo.core.client.ZooKeeperInstance in project accumulo by apache.

the class KerberosProxyIT method proxiedUserAccessWithoutAccumuloProxy.

@Test
public void proxiedUserAccessWithoutAccumuloProxy() throws Exception {
    final String tableName = getUniqueNames(1)[0];
    ClusterUser rootUser = kdc.getRootUser();
    final UserGroupInformation rootUgi = UserGroupInformation.loginUserFromKeytabAndReturnUGI(rootUser.getPrincipal(), rootUser.getKeytab().getAbsolutePath());
    final UserGroupInformation realUgi = UserGroupInformation.loginUserFromKeytabAndReturnUGI(proxyPrincipal, proxyKeytab.getAbsolutePath());
    final String userWithoutCredentials1 = kdc.qualifyUser(PROXIED_USER1);
    final String userWithoutCredentials2 = kdc.qualifyUser(PROXIED_USER2);
    final String userWithoutCredentials3 = kdc.qualifyUser(PROXIED_USER3);
    final UserGroupInformation proxyUser1 = UserGroupInformation.createProxyUser(userWithoutCredentials1, realUgi);
    final UserGroupInformation proxyUser2 = UserGroupInformation.createProxyUser(userWithoutCredentials2, realUgi);
    final UserGroupInformation proxyUser3 = UserGroupInformation.createProxyUser(userWithoutCredentials3, realUgi);
    // Create a table and user, grant permission to our user to read that table.
    rootUgi.doAs(new PrivilegedExceptionAction<Void>() {

        @Override
        public Void run() throws Exception {
            ZooKeeperInstance inst = new ZooKeeperInstance(mac.getClientConfig());
            Connector conn = inst.getConnector(rootUgi.getUserName(), new KerberosToken());
            conn.tableOperations().create(tableName);
            conn.securityOperations().createLocalUser(userWithoutCredentials1, new PasswordToken("ignored"));
            conn.securityOperations().grantTablePermission(userWithoutCredentials1, tableName, TablePermission.READ);
            conn.securityOperations().createLocalUser(userWithoutCredentials3, new PasswordToken("ignored"));
            conn.securityOperations().grantTablePermission(userWithoutCredentials3, tableName, TablePermission.READ);
            return null;
        }
    });
    realUgi.doAs(new PrivilegedExceptionAction<Void>() {

        @Override
        public Void run() throws Exception {
            ZooKeeperInstance inst = new ZooKeeperInstance(mac.getClientConfig());
            Connector conn = inst.getConnector(proxyPrincipal, new KerberosToken());
            try (Scanner s = conn.createScanner(tableName, Authorizations.EMPTY)) {
                s.iterator().hasNext();
                Assert.fail("Expected to see an exception");
            } catch (RuntimeException e) {
                int numSecurityExceptionsSeen = Iterables.size(Iterables.filter(Throwables.getCausalChain(e), org.apache.accumulo.core.client.AccumuloSecurityException.class));
                assertTrue("Expected to see at least one AccumuloSecurityException, but saw: " + Throwables.getStackTraceAsString(e), numSecurityExceptionsSeen > 0);
            }
            return null;
        }
    });
    // Allowed to be proxied and has read permission
    proxyUser1.doAs(new PrivilegedExceptionAction<Void>() {

        @Override
        public Void run() throws Exception {
            ZooKeeperInstance inst = new ZooKeeperInstance(mac.getClientConfig());
            Connector conn = inst.getConnector(userWithoutCredentials1, new KerberosToken(userWithoutCredentials1));
            Scanner s = conn.createScanner(tableName, Authorizations.EMPTY);
            assertFalse(s.iterator().hasNext());
            return null;
        }
    });
    // Allowed to be proxied but does not have read permission
    proxyUser2.doAs(new PrivilegedExceptionAction<Void>() {

        @Override
        public Void run() throws Exception {
            ZooKeeperInstance inst = new ZooKeeperInstance(mac.getClientConfig());
            Connector conn = inst.getConnector(userWithoutCredentials2, new KerberosToken(userWithoutCredentials3));
            try (Scanner s = conn.createScanner(tableName, Authorizations.EMPTY)) {
                s.iterator().hasNext();
                Assert.fail("Expected to see an exception");
            } catch (RuntimeException e) {
                int numSecurityExceptionsSeen = Iterables.size(Iterables.filter(Throwables.getCausalChain(e), org.apache.accumulo.core.client.AccumuloSecurityException.class));
                assertTrue("Expected to see at least one AccumuloSecurityException, but saw: " + Throwables.getStackTraceAsString(e), numSecurityExceptionsSeen > 0);
            }
            return null;
        }
    });
    // Has read permission but is not allowed to be proxied
    proxyUser3.doAs(new PrivilegedExceptionAction<Void>() {

        @Override
        public Void run() throws Exception {
            ZooKeeperInstance inst = new ZooKeeperInstance(mac.getClientConfig());
            try {
                inst.getConnector(userWithoutCredentials3, new KerberosToken(userWithoutCredentials3));
                Assert.fail("Should not be able to create a Connector as this user cannot be proxied");
            } catch (org.apache.accumulo.core.client.AccumuloSecurityException e) {
            // Expected, this user cannot be proxied
            }
            return null;
        }
    });
}
Also used : Connector(org.apache.accumulo.core.client.Connector) Scanner(org.apache.accumulo.core.client.Scanner) KerberosToken(org.apache.accumulo.core.client.security.tokens.KerberosToken) TTransportException(org.apache.thrift.transport.TTransportException) AccumuloSecurityException(org.apache.accumulo.proxy.thrift.AccumuloSecurityException) ConnectException(java.net.ConnectException) ExpectedException(org.junit.rules.ExpectedException) IOException(java.io.IOException) ZooKeeperInstance(org.apache.accumulo.core.client.ZooKeeperInstance) PasswordToken(org.apache.accumulo.core.client.security.tokens.PasswordToken) ClusterUser(org.apache.accumulo.cluster.ClusterUser) AccumuloSecurityException(org.apache.accumulo.proxy.thrift.AccumuloSecurityException) UserGroupInformation(org.apache.hadoop.security.UserGroupInformation) Test(org.junit.Test)

Example 43 with ZooKeeperInstance

use of org.apache.accumulo.core.client.ZooKeeperInstance in project accumulo-examples by apache.

the class ARS method main.

public static void main(String[] args) throws Exception {
    final ConsoleReader reader = new ConsoleReader();
    ARS ars = null;
    while (true) {
        String line = reader.readLine(">");
        if (line == null)
            break;
        final String[] tokens = line.split("\\s+");
        if (tokens[0].equals("reserve") && tokens.length >= 4 && ars != null) {
            // start up multiple threads all trying to reserve the same resource, no more than one should succeed
            final ARS fars = ars;
            ArrayList<Thread> threads = new ArrayList<>();
            for (int i = 3; i < tokens.length; i++) {
                final int whoIndex = i;
                Runnable reservationTask = new Runnable() {

                    @Override
                    public void run() {
                        try {
                            reader.println("  " + String.format("%20s", tokens[whoIndex]) + " : " + fars.reserve(tokens[1], tokens[2], tokens[whoIndex]));
                        } catch (Exception e) {
                            log.warn("Could not write to the ConsoleReader.", e);
                        }
                    }
                };
                threads.add(new Thread(reservationTask));
            }
            for (Thread thread : threads) thread.start();
            for (Thread thread : threads) thread.join();
        } else if (tokens[0].equals("cancel") && tokens.length == 4 && ars != null) {
            ars.cancel(tokens[1], tokens[2], tokens[3]);
        } else if (tokens[0].equals("list") && tokens.length == 3 && ars != null) {
            List<String> reservations = ars.list(tokens[1], tokens[2]);
            if (reservations.size() > 0) {
                reader.println("  Reservation holder : " + reservations.get(0));
                if (reservations.size() > 1)
                    reader.println("  Wait list : " + reservations.subList(1, reservations.size()));
            }
        } else if (tokens[0].equals("quit") && tokens.length == 1) {
            break;
        } else if (tokens[0].equals("connect") && tokens.length == 6 && ars == null) {
            ZooKeeperInstance zki = new ZooKeeperInstance(ClientConfiguration.create().withInstance(tokens[1]).withZkHosts(tokens[2]));
            Connector conn = zki.getConnector(tokens[3], new PasswordToken(tokens[4]));
            if (conn.tableOperations().exists(tokens[5])) {
                ars = new ARS(conn, tokens[5]);
                reader.println("  connected");
            } else
                reader.println("  No Such Table");
        } else {
            System.out.println("  Commands : ");
            if (ars == null) {
                reader.println("    connect <instance> <zookeepers> <user> <pass> <table>");
            } else {
                reader.println("    reserve <what> <when> <who> {who}");
                reader.println("    cancel <what> <when> <who>");
                reader.println("    list <what> <when>");
            }
        }
    }
}
Also used : Connector(org.apache.accumulo.core.client.Connector) ConsoleReader(jline.console.ConsoleReader) ArrayList(java.util.ArrayList) ZooKeeperInstance(org.apache.accumulo.core.client.ZooKeeperInstance) PasswordToken(org.apache.accumulo.core.client.security.tokens.PasswordToken)

Example 44 with ZooKeeperInstance

use of org.apache.accumulo.core.client.ZooKeeperInstance in project accumulo by apache.

the class ClientContext method getConnector.

/**
 * Retrieve a connector
 */
public Connector getConnector() throws AccumuloException, AccumuloSecurityException {
    // avoid making more connectors than necessary
    if (conn == null) {
        if (getInstance() instanceof ZooKeeperInstance) {
            // reuse existing context
            conn = new ConnectorImpl(this);
        } else {
            Credentials c = getCredentials();
            conn = getInstance().getConnector(c.getPrincipal(), c.getToken());
        }
    }
    return conn;
}
Also used : TCredentials(org.apache.accumulo.core.security.thrift.TCredentials) ZooKeeperInstance(org.apache.accumulo.core.client.ZooKeeperInstance)

Example 45 with ZooKeeperInstance

use of org.apache.accumulo.core.client.ZooKeeperInstance in project hive by apache.

the class TestHiveAccumuloTableInputFormat method testConfigureAccumuloInputFormatWithEmptyColumns.

@Test
public void testConfigureAccumuloInputFormatWithEmptyColumns() throws Exception {
    AccumuloConnectionParameters accumuloParams = new AccumuloConnectionParameters(conf);
    ColumnMapper columnMapper = new ColumnMapper(conf.get(AccumuloSerDeParameters.COLUMN_MAPPINGS), conf.get(AccumuloSerDeParameters.DEFAULT_STORAGE_TYPE), columnNames, columnTypes);
    HashSet<Pair<Text, Text>> cfCqPairs = Sets.newHashSet();
    List<IteratorSetting> iterators = new ArrayList<IteratorSetting>();
    Set<Range> ranges = Collections.singleton(new Range());
    String instanceName = "realInstance";
    String zookeepers = "host1:2181,host2:2181,host3:2181";
    IteratorSetting cfg = new IteratorSetting(50, PrimitiveComparisonFilter.class);
    cfg.addOption(PrimitiveComparisonFilter.P_COMPARE_CLASS, StringCompare.class.getName());
    cfg.addOption(PrimitiveComparisonFilter.COMPARE_OPT_CLASS, Equal.class.getName());
    cfg.addOption(PrimitiveComparisonFilter.CONST_VAL, "dave");
    cfg.addOption(PrimitiveComparisonFilter.COLUMN, "person:name");
    iterators.add(cfg);
    cfg = new IteratorSetting(50, PrimitiveComparisonFilter.class);
    cfg.addOption(PrimitiveComparisonFilter.P_COMPARE_CLASS, IntCompare.class.getName());
    cfg.addOption(PrimitiveComparisonFilter.COMPARE_OPT_CLASS, Equal.class.getName());
    cfg.addOption(PrimitiveComparisonFilter.CONST_VAL, "50");
    cfg.addOption(PrimitiveComparisonFilter.COLUMN, "person:age");
    iterators.add(cfg);
    ZooKeeperInstance zkInstance = Mockito.mock(ZooKeeperInstance.class);
    HiveAccumuloTableInputFormat mockInputFormat = Mockito.mock(HiveAccumuloTableInputFormat.class);
    HiveAccumuloHelper helper = Mockito.mock(HiveAccumuloHelper.class);
    // Stub out the ZKI mock
    Mockito.when(zkInstance.getInstanceName()).thenReturn(instanceName);
    Mockito.when(zkInstance.getZooKeepers()).thenReturn(zookeepers);
    Mockito.when(mockInputFormat.getPairCollection(columnMapper.getColumnMappings())).thenReturn(cfCqPairs);
    // Stub out a mocked Helper instance
    Mockito.when(mockInputFormat.getHelper()).thenReturn(helper);
    // Call out to the real configure method
    Mockito.doCallRealMethod().when(mockInputFormat).configure(conf, zkInstance, con, accumuloParams, columnMapper, iterators, ranges);
    // Also compute the correct cf:cq pairs so we can assert the right argument was passed
    Mockito.doCallRealMethod().when(mockInputFormat).getPairCollection(columnMapper.getColumnMappings());
    mockInputFormat.configure(conf, zkInstance, con, accumuloParams, columnMapper, iterators, ranges);
    // Verify that the correct methods are invoked on AccumuloInputFormat
    Mockito.verify(helper).setInputFormatZooKeeperInstance(conf, instanceName, zookeepers, false);
    Mockito.verify(helper).setInputFormatConnectorInfo(conf, USER, new PasswordToken(PASS));
    Mockito.verify(mockInputFormat).setInputTableName(conf, TEST_TABLE);
    Mockito.verify(mockInputFormat).setScanAuthorizations(conf, con.securityOperations().getUserAuthorizations(USER));
    Mockito.verify(mockInputFormat).addIterators(conf, iterators);
    Mockito.verify(mockInputFormat).setRanges(conf, ranges);
// fetchColumns is not called because we had no columns to fetch
}
Also used : StringCompare(org.apache.hadoop.hive.accumulo.predicate.compare.StringCompare) ArrayList(java.util.ArrayList) Range(org.apache.accumulo.core.data.Range) ZooKeeperInstance(org.apache.accumulo.core.client.ZooKeeperInstance) HiveAccumuloHelper(org.apache.hadoop.hive.accumulo.HiveAccumuloHelper) PasswordToken(org.apache.accumulo.core.client.security.tokens.PasswordToken) IteratorSetting(org.apache.accumulo.core.client.IteratorSetting) PrimitiveComparisonFilter(org.apache.hadoop.hive.accumulo.predicate.PrimitiveComparisonFilter) GreaterThanOrEqual(org.apache.hadoop.hive.accumulo.predicate.compare.GreaterThanOrEqual) Equal(org.apache.hadoop.hive.accumulo.predicate.compare.Equal) IntCompare(org.apache.hadoop.hive.accumulo.predicate.compare.IntCompare) AccumuloConnectionParameters(org.apache.hadoop.hive.accumulo.AccumuloConnectionParameters) ColumnMapper(org.apache.hadoop.hive.accumulo.columns.ColumnMapper) Pair(org.apache.accumulo.core.util.Pair) Test(org.junit.Test)

Aggregations

ZooKeeperInstance (org.apache.accumulo.core.client.ZooKeeperInstance)52 PasswordToken (org.apache.accumulo.core.client.security.tokens.PasswordToken)35 Instance (org.apache.accumulo.core.client.Instance)24 Connector (org.apache.accumulo.core.client.Connector)17 AccumuloException (org.apache.accumulo.core.client.AccumuloException)15 MockInstance (org.apache.accumulo.core.client.mock.MockInstance)15 AccumuloSecurityException (org.apache.accumulo.core.client.AccumuloSecurityException)13 IOException (java.io.IOException)8 Test (org.junit.Test)8 ClientConfiguration (org.apache.accumulo.core.client.ClientConfiguration)7 AccumuloRdfConfiguration (org.apache.rya.accumulo.AccumuloRdfConfiguration)7 Range (org.apache.accumulo.core.data.Range)6 ArrayList (java.util.ArrayList)5 MiniAccumuloCluster (org.apache.accumulo.minicluster.MiniAccumuloCluster)5 Text (org.apache.hadoop.io.Text)5 File (java.io.File)4 IteratorSetting (org.apache.accumulo.core.client.IteratorSetting)4 TableNotFoundException (org.apache.accumulo.core.client.TableNotFoundException)4 Pair (org.apache.accumulo.core.util.Pair)4 AccumuloConnectionParameters (org.apache.hadoop.hive.accumulo.AccumuloConnectionParameters)4