Search in sources :

Example 1 with HostAndPort

use of org.apache.accumulo.core.util.HostAndPort in project accumulo by apache.

the class MultiTserverReplicationIT method tserverReplicationServicePortsAreAdvertised.

@Test
public void tserverReplicationServicePortsAreAdvertised() throws Exception {
    // Wait for the cluster to be up
    Connector conn = getConnector();
    Instance inst = conn.getInstance();
    // Wait for a tserver to come up to fulfill this request
    conn.tableOperations().create("foo");
    try (Scanner s = conn.createScanner("foo", Authorizations.EMPTY)) {
        Assert.assertEquals(0, Iterables.size(s));
        ZooReader zreader = new ZooReader(inst.getZooKeepers(), inst.getZooKeepersSessionTimeOut());
        Set<String> tserverHost = new HashSet<>();
        tserverHost.addAll(zreader.getChildren(ZooUtil.getRoot(inst) + Constants.ZTSERVERS));
        Set<HostAndPort> replicationServices = new HashSet<>();
        for (String tserver : tserverHost) {
            try {
                byte[] portData = zreader.getData(ZooUtil.getRoot(inst) + ReplicationConstants.ZOO_TSERVERS + "/" + tserver, null);
                HostAndPort replAddress = HostAndPort.fromString(new String(portData, UTF_8));
                replicationServices.add(replAddress);
            } catch (Exception e) {
                log.error("Could not find port for {}", tserver, e);
                Assert.fail("Did not find replication port advertisement for " + tserver);
            }
        }
        // Each tserver should also have equial replicaiton services running internally
        Assert.assertEquals("Expected an equal number of replication servicers and tservers", tserverHost.size(), replicationServices.size());
    }
}
Also used : Connector(org.apache.accumulo.core.client.Connector) Scanner(org.apache.accumulo.core.client.Scanner) HostAndPort(org.apache.accumulo.core.util.HostAndPort) ZooReader(org.apache.accumulo.fate.zookeeper.ZooReader) Instance(org.apache.accumulo.core.client.Instance) HashSet(java.util.HashSet) Test(org.junit.Test)

Example 2 with HostAndPort

use of org.apache.accumulo.core.util.HostAndPort in project accumulo by apache.

the class Proxy method execute.

@Override
public void execute(final String[] args) throws Exception {
    Opts opts = new Opts();
    opts.parseArgs(Proxy.class.getName(), args);
    Properties props = new Properties();
    if (opts.prop != null) {
        props = opts.prop;
    } else {
        try (InputStream is = this.getClass().getClassLoader().getResourceAsStream("proxy.properties")) {
            if (is != null) {
                props.load(is);
            } else {
                System.err.println("proxy.properties needs to be specified as argument (using -p) or on the classpath (by putting the file in conf/)");
                System.exit(-1);
            }
        }
    }
    boolean useMini = Boolean.parseBoolean(props.getProperty(USE_MINI_ACCUMULO_KEY, USE_MINI_ACCUMULO_DEFAULT));
    boolean useMock = Boolean.parseBoolean(props.getProperty(USE_MOCK_INSTANCE_KEY, USE_MOCK_INSTANCE_DEFAULT));
    String instance = props.getProperty(ACCUMULO_INSTANCE_NAME_KEY);
    String zookeepers = props.getProperty(ZOOKEEPERS_KEY);
    if (!useMini && !useMock && instance == null) {
        System.err.println("Properties file must contain one of : useMiniAccumulo=true, useMockInstance=true, or instance=<instance name>");
        System.exit(1);
    }
    if (instance != null && zookeepers == null) {
        System.err.println("When instance is set in properties file, zookeepers must also be set.");
        System.exit(1);
    }
    if (!props.containsKey("port")) {
        System.err.println("No port property");
        System.exit(1);
    }
    if (useMini) {
        log.info("Creating mini cluster");
        final File folder = Files.createTempDirectory(System.currentTimeMillis() + "").toFile();
        final MiniAccumuloCluster accumulo = new MiniAccumuloCluster(folder, "secret");
        accumulo.start();
        props.setProperty("instance", accumulo.getConfig().getInstanceName());
        props.setProperty("zookeepers", accumulo.getZooKeepers());
        Runtime.getRuntime().addShutdownHook(new Thread() {

            @Override
            public void start() {
                try {
                    accumulo.stop();
                } catch (Exception e) {
                    throw new RuntimeException();
                } finally {
                    if (!folder.delete())
                        log.warn("Unexpected error removing {}", folder);
                }
            }
        });
    }
    Class<? extends TProtocolFactory> protoFactoryClass = Class.forName(props.getProperty("protocolFactory", TCompactProtocol.Factory.class.getName())).asSubclass(TProtocolFactory.class);
    TProtocolFactory protoFactory = protoFactoryClass.newInstance();
    int port = Integer.parseInt(props.getProperty("port"));
    String hostname = props.getProperty(THRIFT_SERVER_HOSTNAME, THRIFT_SERVER_HOSTNAME_DEFAULT);
    HostAndPort address = HostAndPort.fromParts(hostname, port);
    ServerAddress server = createProxyServer(address, protoFactory, props);
    // Wait for the server to come up
    while (!server.server.isServing()) {
        Thread.sleep(100);
    }
    log.info("Proxy server started on {}", server.getAddress());
    while (server.server.isServing()) {
        Thread.sleep(1000);
    }
}
Also used : TProtocolFactory(org.apache.thrift.protocol.TProtocolFactory) FileInputStream(java.io.FileInputStream) InputStream(java.io.InputStream) MiniAccumuloCluster(org.apache.accumulo.minicluster.MiniAccumuloCluster) ServerAddress(org.apache.accumulo.server.rpc.ServerAddress) MetricsFactory(org.apache.accumulo.server.metrics.MetricsFactory) LoggerFactory(org.slf4j.LoggerFactory) TProtocolFactory(org.apache.thrift.protocol.TProtocolFactory) Properties(java.util.Properties) IOException(java.io.IOException) HostAndPort(org.apache.accumulo.core.util.HostAndPort) AccumuloProxy(org.apache.accumulo.proxy.thrift.AccumuloProxy) File(java.io.File)

Example 3 with HostAndPort

use of org.apache.accumulo.core.util.HostAndPort in project accumulo by apache.

the class SimpleGarbageCollector method startStatsService.

private HostAndPort startStatsService() throws UnknownHostException {
    Iface rpcProxy = RpcWrapper.service(this);
    final Processor<Iface> processor;
    if (ThriftServerType.SASL == getThriftServerType()) {
        Iface tcProxy = TCredentialsUpdatingWrapper.service(rpcProxy, getClass(), getConfiguration());
        processor = new Processor<>(tcProxy);
    } else {
        processor = new Processor<>(rpcProxy);
    }
    int[] port = getConfiguration().getPort(Property.GC_PORT);
    HostAndPort[] addresses = TServerUtils.getHostAndPorts(this.opts.getAddress(), port);
    long maxMessageSize = getConfiguration().getAsBytes(Property.GENERAL_MAX_MESSAGE_SIZE);
    try {
        ServerAddress server = TServerUtils.startTServer(getConfiguration(), getThriftServerType(), processor, this.getClass().getSimpleName(), "GC Monitor Service", 2, getConfiguration().getCount(Property.GENERAL_SIMPLETIMER_THREADPOOL_SIZE), 1000, maxMessageSize, getServerSslParams(), getSaslParams(), 0, addresses);
        log.debug("Starting garbage collector listening on " + server.address);
        return server.address;
    } catch (Exception ex) {
        // ACCUMULO-3651 Level changed to error and FATAL added to message for slf4j compatibility
        log.error("FATAL:", ex);
        throw new RuntimeException(ex);
    }
}
Also used : HostAndPort(org.apache.accumulo.core.util.HostAndPort) Iface(org.apache.accumulo.core.gc.thrift.GCMonitorService.Iface) ServerAddress(org.apache.accumulo.server.rpc.ServerAddress) TableNotFoundException(org.apache.accumulo.core.client.TableNotFoundException) InvalidProtocolBufferException(com.google.protobuf.InvalidProtocolBufferException) MutationsRejectedException(org.apache.accumulo.core.client.MutationsRejectedException) FileNotFoundException(java.io.FileNotFoundException) ReplicationTableOfflineException(org.apache.accumulo.core.replication.ReplicationTableOfflineException) AccumuloSecurityException(org.apache.accumulo.core.client.AccumuloSecurityException) KeeperException(org.apache.zookeeper.KeeperException) IOException(java.io.IOException) UnknownHostException(java.net.UnknownHostException) AccumuloException(org.apache.accumulo.core.client.AccumuloException)

Example 4 with HostAndPort

use of org.apache.accumulo.core.util.HostAndPort in project accumulo by apache.

the class GarbageCollectorCommunicatesWithTServersIT method testUnreferencedWalInTserverIsClosed.

@Test(timeout = 2 * 60 * 1000)
public void testUnreferencedWalInTserverIsClosed() throws Exception {
    final String[] names = getUniqueNames(2);
    // `table` will be replicated, `otherTable` is only used to roll the WAL on the tserver
    final String table = names[0], otherTable = names[1];
    final Connector conn = getConnector();
    // Bring the replication table online first and foremost
    ReplicationTable.setOnline(conn);
    log.info("Creating {}", table);
    conn.tableOperations().create(table);
    conn.tableOperations().setProperty(table, Property.TABLE_REPLICATION.getKey(), "true");
    log.info("Writing a few mutations to the table");
    BatchWriter bw = conn.createBatchWriter(table, null);
    byte[] empty = new byte[0];
    for (int i = 0; i < 5; i++) {
        Mutation m = new Mutation(Integer.toString(i));
        m.put(empty, empty, empty);
        bw.addMutation(m);
    }
    log.info("Flushing mutations to the server");
    bw.close();
    log.info("Checking that metadata only has one WAL recorded for this table");
    Set<String> wals = getWalsForTable(table);
    Assert.assertEquals("Expected to only find two WAL for the table", 2, wals.size());
    log.info("Compacting the table which will remove all WALs from the tablets");
    // Flush our test table to remove the WAL references in it
    conn.tableOperations().flush(table, null, null, true);
    // Flush the metadata table too because it will have a reference to the WAL
    conn.tableOperations().flush(MetadataTable.NAME, null, null, true);
    log.info("Fetching replication statuses from metadata table");
    Map<String, Status> fileToStatus = getMetadataStatusForTable(table);
    Assert.assertEquals("Expected to only find one replication status message", 1, fileToStatus.size());
    String walName = fileToStatus.keySet().iterator().next();
    Assert.assertTrue("Expected log file name from tablet to equal replication entry", wals.contains(walName));
    Status status = fileToStatus.get(walName);
    Assert.assertEquals("Expected Status for file to not be closed", false, status.getClosed());
    Set<String> filesForTable = getFilesForTable(table);
    Assert.assertEquals("Expected to only find one rfile for table", 1, filesForTable.size());
    log.info("Files for table before MajC: {}", filesForTable);
    // Issue a MajC to roll a new file in HDFS
    conn.tableOperations().compact(table, null, null, false, true);
    Set<String> filesForTableAfterCompaction = getFilesForTable(table);
    log.info("Files for table after MajC: {}", filesForTableAfterCompaction);
    Assert.assertEquals("Expected to only find one rfile for table", 1, filesForTableAfterCompaction.size());
    Assert.assertNotEquals("Expected the files before and after compaction to differ", filesForTableAfterCompaction, filesForTable);
    // Use the rfile which was just replaced by the MajC to determine when the GC has ran
    Path fileToBeDeleted = new Path(filesForTable.iterator().next());
    FileSystem fs = getCluster().getFileSystem();
    boolean fileExists = fs.exists(fileToBeDeleted);
    while (fileExists) {
        log.info("File which should get deleted still exists: {}", fileToBeDeleted);
        Thread.sleep(2000);
        fileExists = fs.exists(fileToBeDeleted);
    }
    // At this point in time, we *know* that the GarbageCollector has run which means that the Status
    // for our WAL should not be altered.
    Map<String, Status> fileToStatusAfterMinc = getMetadataStatusForTable(table);
    Assert.assertEquals("Expected to still find only one replication status message: " + fileToStatusAfterMinc, 1, fileToStatusAfterMinc.size());
    /*
     * To verify that the WALs is still getting closed, we have to force the tserver to close the existing WAL and open a new one instead. The easiest way to do
     * this is to write a load of data that will exceed the 1.33% full threshold that the logger keeps track of
     */
    conn.tableOperations().create(otherTable);
    bw = conn.createBatchWriter(otherTable, null);
    // 500k
    byte[] bigValue = new byte[1024 * 500];
    Arrays.fill(bigValue, (byte) 1);
    // 500k * 50
    for (int i = 0; i < 50; i++) {
        Mutation m = new Mutation(Integer.toString(i));
        m.put(empty, empty, bigValue);
        bw.addMutation(m);
        if (i % 10 == 0) {
            bw.flush();
        }
    }
    bw.close();
    conn.tableOperations().flush(otherTable, null, null, true);
    // Get the tservers which the master deems as active
    final ClientContext context = new ClientContext(conn.getInstance(), new Credentials("root", new PasswordToken(ConfigurableMacBase.ROOT_PASSWORD)), getClientConfig());
    List<String> tservers = MasterClient.execute(context, new ClientExecReturn<List<String>, MasterClientService.Client>() {

        @Override
        public List<String> execute(MasterClientService.Client client) throws Exception {
            return client.getActiveTservers(Tracer.traceInfo(), context.rpcCreds());
        }
    });
    Assert.assertEquals("Expected only one active tservers", 1, tservers.size());
    HostAndPort tserver = HostAndPort.fromString(tservers.get(0));
    // Get the active WALs from that server
    log.info("Fetching active WALs from {}", tserver);
    Client client = ThriftUtil.getTServerClient(tserver, context);
    List<String> activeWalsForTserver = client.getActiveLogs(Tracer.traceInfo(), context.rpcCreds());
    log.info("Active wals: {}", activeWalsForTserver);
    Assert.assertEquals("Expected to find only one active WAL", 1, activeWalsForTserver.size());
    String activeWal = new Path(activeWalsForTserver.get(0)).toString();
    Assert.assertNotEquals("Current active WAL on tserver should not be the original WAL we saw", walName, activeWal);
    log.info("Ensuring that replication status does get closed after WAL is no longer in use by Tserver");
    do {
        Map<String, Status> replicationStatuses = getMetadataStatusForTable(table);
        log.info("Got replication status messages {}", replicationStatuses);
        Assert.assertEquals("Did not expect to find additional status records", 1, replicationStatuses.size());
        status = replicationStatuses.values().iterator().next();
        log.info("Current status: {}", ProtobufUtil.toString(status));
        if (status.getClosed()) {
            return;
        }
        log.info("Status is not yet closed, waiting for garbage collector to close it");
        Thread.sleep(2000);
    } while (true);
}
Also used : Connector(org.apache.accumulo.core.client.Connector) HostAndPort(org.apache.accumulo.core.util.HostAndPort) PasswordToken(org.apache.accumulo.core.client.security.tokens.PasswordToken) FileSystem(org.apache.hadoop.fs.FileSystem) RawLocalFileSystem(org.apache.hadoop.fs.RawLocalFileSystem) List(java.util.List) MasterClient(org.apache.accumulo.core.client.impl.MasterClient) Client(org.apache.accumulo.core.tabletserver.thrift.TabletClientService.Client) Status(org.apache.accumulo.server.replication.proto.Replication.Status) Path(org.apache.hadoop.fs.Path) ClientContext(org.apache.accumulo.core.client.impl.ClientContext) MasterClientService(org.apache.accumulo.core.master.thrift.MasterClientService) BatchWriter(org.apache.accumulo.core.client.BatchWriter) Mutation(org.apache.accumulo.core.data.Mutation) Credentials(org.apache.accumulo.core.client.impl.Credentials) Test(org.junit.Test)

Example 5 with HostAndPort

use of org.apache.accumulo.core.util.HostAndPort in project accumulo by apache.

the class WatchTheWatchCountIT method test.

@Test
public void test() throws Exception {
    Connector c = getConnector();
    String[] tableNames = getUniqueNames(3);
    for (String tableName : tableNames) {
        c.tableOperations().create(tableName);
    }
    c.tableOperations().list();
    String zooKeepers = c.getInstance().getZooKeepers();
    final long MIN = 475L;
    final long MAX = 700L;
    long total = 0;
    final HostAndPort hostAndPort = HostAndPort.fromString(zooKeepers);
    for (int i = 0; i < 5; i++) {
        try (Socket socket = new Socket(hostAndPort.getHost(), hostAndPort.getPort())) {
            socket.getOutputStream().write("wchs\n".getBytes(), 0, 5);
            byte[] buffer = new byte[1024];
            int n = socket.getInputStream().read(buffer);
            String response = new String(buffer, 0, n);
            total = Long.parseLong(response.split(":")[1].trim());
            log.info("Total: {}", total);
            if (total > MIN && total < MAX) {
                break;
            }
            log.debug("Expected number of watchers to be contained in ({}, {}), but actually was {}. Sleeping and retrying", MIN, MAX, total);
            Thread.sleep(5000);
        }
    }
    assertTrue("Expected number of watchers to be contained in (" + MIN + ", " + MAX + "), but actually was " + total, total > MIN && total < MAX);
}
Also used : Connector(org.apache.accumulo.core.client.Connector) HostAndPort(org.apache.accumulo.core.util.HostAndPort) Socket(java.net.Socket) Test(org.junit.Test)

Aggregations

HostAndPort (org.apache.accumulo.core.util.HostAndPort)38 AccumuloSecurityException (org.apache.accumulo.core.client.AccumuloSecurityException)12 ArrayList (java.util.ArrayList)11 TTransportException (org.apache.thrift.transport.TTransportException)10 AccumuloException (org.apache.accumulo.core.client.AccumuloException)8 ThriftSecurityException (org.apache.accumulo.core.client.impl.thrift.ThriftSecurityException)8 KeyExtent (org.apache.accumulo.core.data.impl.KeyExtent)8 TException (org.apache.thrift.TException)8 UnknownHostException (java.net.UnknownHostException)7 IOException (java.io.IOException)6 Instance (org.apache.accumulo.core.client.Instance)6 TableNotFoundException (org.apache.accumulo.core.client.TableNotFoundException)6 TabletClientService (org.apache.accumulo.core.tabletserver.thrift.TabletClientService)6 TServerInstance (org.apache.accumulo.server.master.state.TServerInstance)6 KeeperException (org.apache.zookeeper.KeeperException)6 Test (org.junit.Test)6 Connector (org.apache.accumulo.core.client.Connector)5 Client (org.apache.accumulo.core.tabletserver.thrift.TabletClientService.Client)5 MasterClient (org.apache.accumulo.core.client.impl.MasterClient)4 Text (org.apache.hadoop.io.Text)4