Search in sources :

Example 11 with AdminClientConfig

use of voldemort.client.protocol.admin.AdminClientConfig in project voldemort by voldemort.

the class VoldemortSwapJob method run.

public void run() throws Exception {
    ExecutorService executor = Executors.newCachedThreadPool();
    // Read the hadoop configuration settings
    JobConf conf = new JobConf();
    Path dataPath = new Path(dataDir);
    String modifiedDataDir = dataPath.makeQualified(FileSystem.get(conf)).toString();
    /*
         * Replace the default protocol and port with the one derived as above
         */
    try {
        modifiedDataDir = VoldemortUtils.modifyURL(modifiedDataDir, hdfsFetcherProtocol, Integer.valueOf(hdfsFetcherPort), false);
    } catch (NumberFormatException nfe) {
        info("The dataDir will not be modified, since hdfsFetcherPort is not a valid port number");
    } catch (IllegalArgumentException e) {
        info("The dataDir will not be modified, since it does not contain the expected " + "structure of protocol:hostname:port/some_path");
    }
    try {
        new Path(modifiedDataDir);
    } catch (IllegalArgumentException e) {
        throw new VoldemortException("Could not create a valid data path out of the supplied dataDir: " + dataDir, e);
    }
    // It should not be necessary to set the max conn / node so high, but it should not be a big deal either. New
    // connections will be created as needed, not upfront, so there should be no extra cost associated with the
    // higher setting. There shouldn't be many parallel requests happening in this use case, but we're going to
    // leave it as is for now, just to minimize the potential for unforeseen regressions.
    AdminClientConfig adminConfig = new AdminClientConfig().setMaxConnectionsPerNode(cluster.getNumberOfNodes()).setMaxBackoffDelayMs(maxBackoffDelayMs).setAdminSocketTimeoutSec(60 * 5);
    ClientConfig clientConfig = new ClientConfig().setBootstrapUrls(cluster.getBootStrapUrls()).setConnectionTimeout(httpTimeoutMs, TimeUnit.MILLISECONDS);
    // Create admin client
    AdminClient client = new AdminClient(adminConfig, clientConfig);
    if (pushVersion == -1L) {
        // Need to retrieve max version
        ArrayList<String> stores = new ArrayList<String>();
        stores.add(storeName);
        Map<String, Long> pushVersions = client.readonlyOps.getROMaxVersion(stores, maxNodeFailures);
        if (pushVersions == null || !pushVersions.containsKey(storeName)) {
            throw new RuntimeException("Push version could not be determined for store " + storeName);
        }
        pushVersion = pushVersions.get(storeName);
        pushVersion++;
    }
    // do the fetch, and if it succeeds, the swap
    info("Initiating fetch of " + storeName + " with dataDir: " + dataDir);
    AdminStoreSwapper swapper = new AdminStoreSwapper(executor, client, httpTimeoutMs, rollbackFailedSwap, failedFetchStrategyList, clusterName, buildPrimaryReplicasOnly);
    swapper.fetchAndSwapStoreData(storeName, modifiedDataDir, pushVersion);
    info("Swap complete.");
    executor.shutdownNow();
    executor.awaitTermination(10, TimeUnit.SECONDS);
}
Also used : Path(org.apache.hadoop.fs.Path) AdminClientConfig(voldemort.client.protocol.admin.AdminClientConfig) ArrayList(java.util.ArrayList) VoldemortException(voldemort.VoldemortException) AdminStoreSwapper(voldemort.store.readonly.swapper.AdminStoreSwapper) ExecutorService(java.util.concurrent.ExecutorService) AdminClientConfig(voldemort.client.protocol.admin.AdminClientConfig) ClientConfig(voldemort.client.ClientConfig) JobConf(org.apache.hadoop.mapred.JobConf) AdminClient(voldemort.client.protocol.admin.AdminClient)

Example 12 with AdminClientConfig

use of voldemort.client.protocol.admin.AdminClientConfig in project voldemort by voldemort.

the class ZoneShrinkageClientTest method setup.

@Before
public void setup() throws IOException {
    ClusterTestUtils.reset();
    sourceCluster = ClusterTestUtils.getZZZCluster();
    targetCluster = RebalanceUtils.vacateZone(sourceCluster, DROP_ZONE_ID);
    sourceStoreDefs = ClusterTestUtils.getZZZStoreDefsBDB();
    targetStoreDefs = RebalanceUtils.dropZone(sourceStoreDefs, DROP_ZONE_ID);
    File sourceStoreDefsXml = ServerTestUtils.createTempFile("zzz-stores-", ".xml");
    FileUtils.writeStringToFile(sourceStoreDefsXml, new StoreDefinitionsMapper().writeStoreList(sourceStoreDefs));
    servers = new VoldemortServer[sourceCluster.getNumberOfNodes()];
    ServerTestUtils.startVoldemortCluster(servers, null, null, sourceStoreDefsXml.getAbsolutePath(), new Properties(), sourceCluster);
    Properties adminProperties = new Properties();
    adminProperties.setProperty("max_connections", "2");
    adminClient = new AdminClient(servers[0].getMetadataStore().getCluster(), new AdminClientConfig(adminProperties));
}
Also used : AdminClientConfig(voldemort.client.protocol.admin.AdminClientConfig) StoreDefinitionsMapper(voldemort.xml.StoreDefinitionsMapper) Properties(java.util.Properties) File(java.io.File) AdminClient(voldemort.client.protocol.admin.AdminClient) Before(org.junit.Before)

Example 13 with AdminClientConfig

use of voldemort.client.protocol.admin.AdminClientConfig in project voldemort by voldemort.

the class StreamingSlopPusherJob method run.

public void run() {
    // load the metadata before each run, in case the cluster is changed
    loadMetadata();
    // don't try to run slop pusher job when rebalancing
    if (metadataStore.getServerStateUnlocked().equals(MetadataStore.VoldemortState.REBALANCING_MASTER_SERVER)) {
        logger.error("Cannot run slop pusher job since Voldemort server is rebalancing");
        return;
    }
    boolean terminatedEarly = false;
    Date startTime = new Date();
    logger.info("Started streaming slop pusher job at " + startTime);
    SlopStorageEngine slopStorageEngine = storeRepo.getSlopStore();
    ClosableIterator<Pair<ByteArray, Versioned<Slop>>> iterator = null;
    if (adminClient == null) {
        adminClient = new AdminClient(cluster, new AdminClientConfig().setMaxConnectionsPerNode(1));
    }
    if (voldemortConfig.getSlopZonesDownToTerminate() > 0) {
        // Populating the zone mapping for early termination
        zoneMapping.clear();
        for (Node n : cluster.getNodes()) {
            if (failureDetector.isAvailable(n)) {
                Set<Integer> nodes = zoneMapping.get(n.getZoneId());
                if (nodes == null) {
                    nodes = Sets.newHashSet();
                    zoneMapping.put(n.getZoneId(), nodes);
                }
                nodes.add(n.getId());
            }
        }
        // Check how many zones are down
        int zonesDown = 0;
        for (Zone zone : cluster.getZones()) {
            if (zoneMapping.get(zone.getId()) == null || zoneMapping.get(zone.getId()).size() == 0)
                zonesDown++;
        }
        // Terminate early
        if (voldemortConfig.getSlopZonesDownToTerminate() <= zoneMapping.size() && zonesDown >= voldemortConfig.getSlopZonesDownToTerminate()) {
            logger.info("Completed streaming slop pusher job at " + startTime + " early because " + zonesDown + " zones are down");
            stopAdminClient();
            return;
        }
    }
    // Clearing the statistics
    AtomicLong attemptedPushes = new AtomicLong(0);
    for (Node node : cluster.getNodes()) {
        attemptedByNode.put(node.getId(), 0L);
        succeededByNode.put(node.getId(), 0L);
    }
    Set<String> storeNames = StoreDefinitionUtils.getStoreNamesSet(metadataStore.getStoreDefList());
    acquireRepairPermit();
    try {
        StorageEngine<ByteArray, Slop, byte[]> slopStore = slopStorageEngine.asSlopStore();
        iterator = slopStore.entries();
        while (iterator.hasNext()) {
            Pair<ByteArray, Versioned<Slop>> keyAndVal;
            try {
                keyAndVal = iterator.next();
                Versioned<Slop> versioned = keyAndVal.getSecond();
                // Track the scan progress
                if (this.streamStats != null) {
                    this.streamStats.reportStreamingSlopScan();
                }
                // Retrieve the node
                int nodeId = versioned.getValue().getNodeId();
                // check for dead slops
                if (isSlopDead(cluster, storeNames, versioned.getValue())) {
                    handleDeadSlop(slopStorageEngine, keyAndVal);
                    // ignore it.
                    continue;
                }
                Node node = cluster.getNodeById(nodeId);
                attemptedPushes.incrementAndGet();
                Long attempted = attemptedByNode.get(nodeId);
                attemptedByNode.put(nodeId, attempted + 1L);
                if (attemptedPushes.get() % 10000 == 0)
                    logger.info("Attempted pushing " + attemptedPushes + " slops");
                if (logger.isTraceEnabled())
                    logger.trace("Pushing slop for " + versioned.getValue().getNodeId() + " and store  " + versioned.getValue().getStoreName() + " of key: " + versioned.getValue().getKey());
                if (failureDetector.isAvailable(node)) {
                    SynchronousQueue<Versioned<Slop>> slopQueue = slopQueues.get(nodeId);
                    if (slopQueue == null) {
                        // No previous slop queue, add one
                        slopQueue = new SynchronousQueue<Versioned<Slop>>();
                        slopQueues.put(nodeId, slopQueue);
                        consumerResults.add(consumerExecutor.submit(new SlopConsumer(nodeId, slopQueue, slopStorageEngine)));
                    }
                    boolean offered = slopQueue.offer(versioned, voldemortConfig.getClientRoutingTimeoutMs(), TimeUnit.MILLISECONDS);
                    if (!offered) {
                        if (logger.isDebugEnabled())
                            logger.debug("No consumer appeared for slop in " + voldemortConfig.getClientConnectionTimeoutMs() + " ms");
                    }
                    readThrottler.maybeThrottle(nBytesRead(keyAndVal));
                } else {
                    logger.trace(node + " declared down, won't push slop");
                }
            } catch (RejectedExecutionException e) {
                throw new VoldemortException("Ran out of threads in executor", e);
            }
        }
    } catch (InterruptedException e) {
        logger.warn("Interrupted exception", e);
        terminatedEarly = true;
    } catch (Exception e) {
        logger.error(e, e);
        terminatedEarly = true;
    } finally {
        try {
            if (iterator != null)
                iterator.close();
        } catch (Exception e) {
            logger.warn("Failed to close iterator cleanly as database might be closed", e);
        }
        // Adding the poison pill
        for (SynchronousQueue<Versioned<Slop>> slopQueue : slopQueues.values()) {
            try {
                slopQueue.put(END);
            } catch (InterruptedException e) {
                logger.warn("Error putting poison pill", e);
            }
        }
        for (Future result : consumerResults) {
            try {
                result.get();
            } catch (Exception e) {
                logger.warn("Exception in consumer", e);
            }
        }
        // Only if exception didn't take place do we update the counts
        if (!terminatedEarly) {
            Map<Integer, Long> outstanding = Maps.newHashMapWithExpectedSize(cluster.getNumberOfNodes());
            for (int nodeId : succeededByNode.keySet()) {
                logger.info("Slops to node " + nodeId + " - Succeeded - " + succeededByNode.get(nodeId) + " - Attempted - " + attemptedByNode.get(nodeId));
                outstanding.put(nodeId, attemptedByNode.get(nodeId) - succeededByNode.get(nodeId));
            }
            slopStorageEngine.resetStats(outstanding);
            logger.info("Completed streaming slop pusher job which started at " + startTime);
        } else {
            for (int nodeId : succeededByNode.keySet()) {
                logger.info("Slops to node " + nodeId + " - Succeeded - " + succeededByNode.get(nodeId) + " - Attempted - " + attemptedByNode.get(nodeId));
            }
            logger.info("Completed early streaming slop pusher job which started at " + startTime);
        }
        // Shut down admin client as not to waste connections
        consumerResults.clear();
        slopQueues.clear();
        stopAdminClient();
        this.repairPermits.release(this.getClass().getCanonicalName());
    }
}
Also used : Versioned(voldemort.versioning.Versioned) Node(voldemort.cluster.Node) VoldemortException(voldemort.VoldemortException) ByteArray(voldemort.utils.ByteArray) SlopStorageEngine(voldemort.store.slop.SlopStorageEngine) Pair(voldemort.utils.Pair) AdminClientConfig(voldemort.client.protocol.admin.AdminClientConfig) Zone(voldemort.cluster.Zone) Date(java.util.Date) RejectedExecutionException(java.util.concurrent.RejectedExecutionException) RejectedExecutionException(java.util.concurrent.RejectedExecutionException) VoldemortException(voldemort.VoldemortException) UnreachableStoreException(voldemort.store.UnreachableStoreException) AtomicLong(java.util.concurrent.atomic.AtomicLong) AtomicLong(java.util.concurrent.atomic.AtomicLong) Future(java.util.concurrent.Future) Slop(voldemort.store.slop.Slop) AdminClient(voldemort.client.protocol.admin.AdminClient)

Example 14 with AdminClientConfig

use of voldemort.client.protocol.admin.AdminClientConfig in project voldemort by voldemort.

the class OfflineStateTest method setUp.

@Before
public void setUp() throws IOException {
    int numServers = 1;
    servers = new VoldemortServer[numServers];
    int[][] partitionMap = { { 0, 1, 2, 3 } };
    // , { 4, 5, 6, 7 } };
    Properties serverProperties = new Properties();
    serverProperties.setProperty("client.max.connections.per.node", "20");
    serverProperties.setProperty("enforce.retention.policy.on.read", Boolean.toString(onlineRetention));
    cluster = ServerTestUtils.startVoldemortCluster(numServers, servers, partitionMap, socketStoreFactory, useNio, null, storesXmlfile, serverProperties);
    storeDefs = new StoreDefinitionsMapper().readStoreList(new File(storesXmlfile));
    Properties adminProperties = new Properties();
    adminProperties.setProperty("max_connections", "20");
    adminClient = new AdminClient(cluster, new AdminClientConfig(adminProperties));
    Node node = cluster.getNodeById(0);
    String bootstrapUrl = "tcp://" + node.getHost() + ":" + node.getSocketPort();
    StoreClientFactory storeClientFactory = new SocketStoreClientFactory(new ClientConfig().setBootstrapUrls(bootstrapUrl));
    storeClient = storeClientFactory.getStoreClient(STORE_NAME);
}
Also used : AdminClientConfig(voldemort.client.protocol.admin.AdminClientConfig) Node(voldemort.cluster.Node) StoreDefinitionsMapper(voldemort.xml.StoreDefinitionsMapper) Properties(java.util.Properties) AdminClientConfig(voldemort.client.protocol.admin.AdminClientConfig) File(java.io.File) AdminClient(voldemort.client.protocol.admin.AdminClient) Before(org.junit.Before)

Aggregations

AdminClient (voldemort.client.protocol.admin.AdminClient)14 AdminClientConfig (voldemort.client.protocol.admin.AdminClientConfig)14 Properties (java.util.Properties)8 Before (org.junit.Before)8 File (java.io.File)5 Node (voldemort.cluster.Node)5 StoreDefinitionsMapper (voldemort.xml.StoreDefinitionsMapper)5 VoldemortException (voldemort.VoldemortException)4 ClientConfig (voldemort.client.ClientConfig)4 IOException (java.io.IOException)2 ExecutorService (java.util.concurrent.ExecutorService)2 AdminStoreSwapper (voldemort.store.readonly.swapper.AdminStoreSwapper)2 ByteArray (voldemort.utils.ByteArray)2 Pair (voldemort.utils.Pair)2 ArrayList (java.util.ArrayList)1 Date (java.util.Date)1 HashMap (java.util.HashMap)1 Map (java.util.Map)1 Future (java.util.concurrent.Future)1 RejectedExecutionException (java.util.concurrent.RejectedExecutionException)1