Search in sources :

Example 1 with EMPTY

use of org.apache.commons.lang3.StringUtils.EMPTY in project PneumaticCraft by MineMaarten.

the class CraftingRegistrator method addPressureChamberRecipes.

private static void addPressureChamberRecipes() {
    IPneumaticRecipeRegistry registry = PneumaticRegistry.getInstance().getRecipeRegistry();
    // diamond
    if (Config.enableCoalToDiamondsRecipe)
        registry.registerPressureChamberRecipe(new ItemStack[] { new ItemStack(Blocks.coal_block, 8, 0) }, 4.0F, new ItemStack[] { new ItemStack(Items.diamond, 1, 0) });
    // compressed iron
    registry.registerPressureChamberRecipe(new Object[] { new ImmutablePair("ingotIron", 1) }, 2F, new ItemStack[] { new ItemStack(Itemss.ingotIronCompressed, 1, 0) });
    registry.registerPressureChamberRecipe(new Object[] { new ImmutablePair("blockIron", 1) }, 2F, new ItemStack[] { new ItemStack(Blockss.compressedIron, 1, 0) });
    // turbine blade
    registry.registerPressureChamberRecipe(new Object[] { new ImmutablePair("dustRedstone", 2), new ImmutablePair("ingotGold", 1) }, 1F, new ItemStack[] { new ItemStack(Itemss.turbineBlade, 1, 0) });
    // plastic
    for (int i = 0; i < 16; i++) {
        registry.registerPressureChamberRecipe(new ItemStack[] { new ItemStack(Itemss.plasticPlant, 1, i) }, 0.5F, new ItemStack[] { new ItemStack(Itemss.plastic, 1, i) });
    }
    // Empty PCB
    registry.registerPressureChamberRecipe(new Object[] { new ItemStack(Itemss.plastic, 1, ItemPlasticPlants.CREEPER_PLANT_DAMAGE), new ImmutablePair(Names.INGOT_IRON_COMPRESSED, 1) }, 1.5F, new ItemStack[] { new ItemStack(Itemss.emptyPCB, 1, Itemss.emptyPCB.getMaxDamage()) });
    // Etching Acid Bucket
    registry.registerPressureChamberRecipe(new ItemStack[] { new ItemStack(Itemss.plastic, 2, ItemPlasticPlants.CREEPER_PLANT_DAMAGE), new ItemStack(Items.rotten_flesh, 2, 0), new ItemStack(Items.gunpowder, 2, 0), new ItemStack(Items.spider_eye, 2, 0), new ItemStack(Items.water_bucket) }, 1.0F, new ItemStack[] { new ItemStack(Fluids.getBucket(Fluids.etchingAcid)) });
    // Transistor
    registry.registerPressureChamberRecipe(new Object[] { new ItemStack(Itemss.plastic, 1, ItemPlasticPlants.SQUID_PLANT_DAMAGE), new ImmutablePair("ingotIronCompressed", 1), new ImmutablePair("dustRedstone", 1) }, 1.0F, new ItemStack[] { new ItemStack(Itemss.transistor) });
    // Capacitor
    registry.registerPressureChamberRecipe(new Object[] { new ItemStack(Itemss.plastic, 1, ItemPlasticPlants.LIGHTNING_PLANT_DAMAGE), new ImmutablePair("ingotIronCompressed", 1), new ImmutablePair("dustRedstone", 1) }, 1.0F, new ItemStack[] { new ItemStack(Itemss.capacitor) });
    //Vacuum dis-enchanting
    registry.registerPressureChamberRecipe(new PressureChamberVacuumEnchantHandler());
}
Also used : ImmutablePair(org.apache.commons.lang3.tuple.ImmutablePair) IPneumaticRecipeRegistry(pneumaticCraft.api.recipe.IPneumaticRecipeRegistry) ItemStack(net.minecraft.item.ItemStack)

Example 2 with EMPTY

use of org.apache.commons.lang3.StringUtils.EMPTY in project neo4j by neo4j.

the class ConsistencyCheckerTest method shouldThrowDescriptiveExceptionOnBrokenGSPP.

@Test
public void shouldThrowDescriptiveExceptionOnBrokenGSPP() throws Exception {
    // GIVEN
    int pageSize = 256;
    PageCursor cursor = new PageAwareByteArrayCursor(pageSize);
    Layout<MutableLong, MutableLong> layout = new SimpleLongLayout();
    TreeNode<MutableLong, MutableLong> treeNode = new TreeNode<>(pageSize, layout);
    long stableGeneration = MIN_GENERATION;
    long crashGeneration = stableGeneration + 1;
    long unstableGeneration = stableGeneration + 2;
    String pointerFieldName = "abc";
    long pointer = 123;
    cursor.next(0);
    treeNode.initializeInternal(cursor, stableGeneration, crashGeneration);
    treeNode.setSuccessor(cursor, pointer, stableGeneration, crashGeneration);
    // WHEN
    try {
        assertNoCrashOrBrokenPointerInGSPP(cursor, stableGeneration, unstableGeneration, pointerFieldName, TreeNode.BYTE_POS_SUCCESSOR, treeNode);
        cursor.checkAndClearCursorException();
        fail("Should have failed");
    } catch (CursorException e) {
        // THEN
        assertThat(e.getMessage(), containsString(pointerFieldName));
        assertThat(e.getMessage(), containsString(pointerFieldName));
        assertThat(e.getMessage(), containsString("state=CRASH"));
        assertThat(e.getMessage(), containsString("state=EMPTY"));
        assertThat(e.getMessage(), containsString(String.valueOf(pointer)));
    }
}
Also used : MutableLong(org.apache.commons.lang3.mutable.MutableLong) CursorException(org.neo4j.io.pagecache.CursorException) CoreMatchers.containsString(org.hamcrest.CoreMatchers.containsString) PageCursor(org.neo4j.io.pagecache.PageCursor) Test(org.junit.Test)

Example 3 with EMPTY

use of org.apache.commons.lang3.StringUtils.EMPTY in project midpoint by Evolveum.

the class ShadowIntegrityCheckResultHandler method checkShadow.

private void checkShadow(ShadowCheckResult checkResult, PrismObject<ShadowType> shadow, Task workerTask, OperationResult result) throws SchemaException {
    ShadowType shadowType = shadow.asObjectable();
    ObjectReferenceType resourceRef = shadowType.getResourceRef();
    if (LOGGER.isTraceEnabled()) {
        LOGGER.trace("Checking shadow {} (resource {})", ObjectTypeUtil.toShortString(shadowType), resourceRef != null ? resourceRef.getOid() : "(null)");
    }
    statistics.incrementShadows();
    if (resourceRef == null) {
        checkResult.recordError(Statistics.NO_RESOURCE_OID, new SchemaException("No resourceRef"));
        fixNoResourceIfRequested(checkResult, Statistics.NO_RESOURCE_OID);
        applyFixes(checkResult, shadow, workerTask, result);
        return;
    }
    String resourceOid = resourceRef.getOid();
    if (resourceOid == null) {
        checkResult.recordError(Statistics.NO_RESOURCE_OID, new SchemaException("Null resource OID"));
        fixNoResourceIfRequested(checkResult, Statistics.NO_RESOURCE_OID);
        applyFixes(checkResult, shadow, workerTask, result);
        return;
    }
    PrismObject<ResourceType> resource = resources.get(resourceOid);
    if (resource == null) {
        statistics.incrementResources();
        try {
            resource = provisioningService.getObject(ResourceType.class, resourceOid, null, workerTask, result);
        } catch (ObjectNotFoundException e) {
            checkResult.recordError(Statistics.NO_RESOURCE, new ObjectNotFoundException("Resource object does not exist: " + e.getMessage(), e));
            fixNoResourceIfRequested(checkResult, Statistics.NO_RESOURCE);
            applyFixes(checkResult, shadow, workerTask, result);
            return;
        } catch (SchemaException e) {
            checkResult.recordError(Statistics.CANNOT_GET_RESOURCE, new SchemaException("Resource object has schema problems: " + e.getMessage(), e));
            return;
        } catch (CommonException | RuntimeException e) {
            checkResult.recordError(Statistics.CANNOT_GET_RESOURCE, new SystemException("Resource object cannot be fetched for some reason: " + e.getMessage(), e));
            return;
        }
        resources.put(resourceOid, resource);
    }
    checkResult.setResource(resource);
    ShadowKindType kind = shadowType.getKind();
    if (kind == null) {
        // TODO or simply assume account?
        checkResult.recordError(Statistics.NO_KIND_SPECIFIED, new SchemaException("No kind specified"));
        return;
    }
    if (checkExtraData) {
        checkOrFixShadowActivationConsistency(checkResult, shadow, fixExtraData);
    }
    PrismObject<ShadowType> fetchedShadow = null;
    if (checkFetch) {
        fetchedShadow = fetchShadow(checkResult, shadow, resource, workerTask, result);
        if (fetchedShadow != null) {
            shadow.setUserData(KEY_EXISTS_ON_RESOURCE, "true");
        }
    }
    if (checkOwners) {
        List<PrismObject<FocusType>> owners = searchOwners(shadow, result);
        if (owners != null) {
            shadow.setUserData(KEY_OWNERS, owners);
            if (owners.size() > 1) {
                checkResult.recordError(Statistics.MULTIPLE_OWNERS, new SchemaException("Multiple owners: " + owners));
            }
        }
        if (shadowType.getSynchronizationSituation() == SynchronizationSituationType.LINKED && (owners == null || owners.isEmpty())) {
            checkResult.recordError(Statistics.LINKED_WITH_NO_OWNER, new SchemaException("Linked shadow with no owner"));
        }
        if (shadowType.getSynchronizationSituation() != SynchronizationSituationType.LINKED && owners != null && !owners.isEmpty()) {
            checkResult.recordError(Statistics.NOT_LINKED_WITH_OWNER, new SchemaException("Shadow with an owner but not marked as linked (marked as " + shadowType.getSynchronizationSituation() + ")"));
        }
    }
    String intent = shadowType.getIntent();
    if (checkIntents && (intent == null || intent.isEmpty())) {
        checkResult.recordWarning(Statistics.NO_INTENT_SPECIFIED, "None or empty intent");
    }
    if (fixIntents && (intent == null || intent.isEmpty())) {
        doFixIntent(checkResult, fetchedShadow, shadow, resource, workerTask, result);
    }
    Pair<String, ShadowKindType> key = new ImmutablePair<>(resourceOid, kind);
    ObjectTypeContext context = contextMap.get(key);
    if (context == null) {
        context = new ObjectTypeContext();
        context.setResource(resource);
        RefinedResourceSchema resourceSchema;
        try {
            resourceSchema = RefinedResourceSchemaImpl.getRefinedSchema(context.getResource(), LayerType.MODEL, prismContext);
        } catch (SchemaException e) {
            checkResult.recordError(Statistics.CANNOT_GET_REFINED_SCHEMA, new SchemaException("Couldn't derive resource schema: " + e.getMessage(), e));
            return;
        }
        if (resourceSchema == null) {
            checkResult.recordError(Statistics.NO_RESOURCE_REFINED_SCHEMA, new SchemaException("No resource schema"));
            return;
        }
        context.setObjectClassDefinition(resourceSchema.getRefinedDefinition(kind, shadowType));
        if (context.getObjectClassDefinition() == null) {
            // TODO or warning only?
            checkResult.recordError(Statistics.NO_OBJECT_CLASS_REFINED_SCHEMA, new SchemaException("No refined object class definition for kind=" + kind + ", intent=" + intent));
            return;
        }
        contextMap.put(key, context);
    }
    try {
        provisioningService.applyDefinition(shadow, workerTask, result);
    } catch (SchemaException | ObjectNotFoundException | CommunicationException | ConfigurationException | ExpressionEvaluationException e) {
        checkResult.recordError(Statistics.OTHER_FAILURE, new SystemException("Couldn't apply definition to shadow from repo", e));
        return;
    }
    Set<RefinedAttributeDefinition<?>> identifiers = new HashSet<>();
    Collection<? extends RefinedAttributeDefinition<?>> primaryIdentifiers = context.getObjectClassDefinition().getPrimaryIdentifiers();
    identifiers.addAll(primaryIdentifiers);
    identifiers.addAll(context.getObjectClassDefinition().getSecondaryIdentifiers());
    PrismContainer<ShadowAttributesType> attributesContainer = shadow.findContainer(ShadowType.F_ATTRIBUTES);
    if (attributesContainer == null) {
        // might happen on unfinished shadows?
        checkResult.recordError(Statistics.OTHER_FAILURE, new SchemaException("No attributes container"));
        return;
    }
    for (RefinedAttributeDefinition<?> identifier : identifiers) {
        PrismProperty property = attributesContainer.getValue().findProperty(identifier.getName());
        if (property == null || property.size() == 0) {
            checkResult.recordWarning(Statistics.OTHER_FAILURE, "No value for identifier " + identifier.getName());
            continue;
        }
        if (property.size() > 1) {
            // we don't expect multi-valued identifiers
            checkResult.recordError(Statistics.OTHER_FAILURE, new SchemaException("Multi-valued identifier " + identifier.getName() + " with values " + property.getValues()));
            continue;
        }
        // size == 1
        String value = (String) property.getValue().getValue();
        if (value == null) {
            checkResult.recordWarning(Statistics.OTHER_FAILURE, "Null value for identifier " + identifier.getName());
            continue;
        }
        if (checkUniqueness) {
            if (!checkDuplicatesOnPrimaryIdentifiersOnly || primaryIdentifiers.contains(identifier)) {
                addIdentifierValue(checkResult, context, identifier.getName(), value, shadow);
            }
        }
        if (checkNormalization) {
            doCheckNormalization(checkResult, identifier, value, context);
        }
    }
    applyFixes(checkResult, shadow, workerTask, result);
}
Also used : ExpressionEvaluationException(com.evolveum.midpoint.util.exception.ExpressionEvaluationException) PrismObject(com.evolveum.midpoint.prism.PrismObject) SystemException(com.evolveum.midpoint.util.exception.SystemException) ConfigurationException(com.evolveum.midpoint.util.exception.ConfigurationException) RefinedAttributeDefinition(com.evolveum.midpoint.common.refinery.RefinedAttributeDefinition) RefinedResourceSchema(com.evolveum.midpoint.common.refinery.RefinedResourceSchema) HashSet(java.util.HashSet) SchemaException(com.evolveum.midpoint.util.exception.SchemaException) CommunicationException(com.evolveum.midpoint.util.exception.CommunicationException) ShadowType(com.evolveum.midpoint.xml.ns._public.common.common_3.ShadowType) ShadowAttributesType(com.evolveum.midpoint.xml.ns._public.common.common_3.ShadowAttributesType) ResourceType(com.evolveum.midpoint.xml.ns._public.common.common_3.ResourceType) ObjectReferenceType(com.evolveum.midpoint.xml.ns._public.common.common_3.ObjectReferenceType) PrismProperty(com.evolveum.midpoint.prism.PrismProperty) ImmutablePair(org.apache.commons.lang3.tuple.ImmutablePair) ObjectNotFoundException(com.evolveum.midpoint.util.exception.ObjectNotFoundException) CommonException(com.evolveum.midpoint.util.exception.CommonException) ShadowKindType(com.evolveum.midpoint.xml.ns._public.common.common_3.ShadowKindType)

Example 4 with EMPTY

use of org.apache.commons.lang3.StringUtils.EMPTY in project apex-core by apache.

the class StreamingAppMasterService method execute.

/**
   * Main run function for the application master
   *
   * @throws YarnException
   */
@SuppressWarnings("SleepWhileInLoop")
private void execute() throws YarnException, IOException {
    LOG.info("Starting ApplicationMaster");
    final Credentials credentials = UserGroupInformation.getCurrentUser().getCredentials();
    LOG.info("number of tokens: {}", credentials.getAllTokens().size());
    Iterator<Token<?>> iter = credentials.getAllTokens().iterator();
    while (iter.hasNext()) {
        Token<?> token = iter.next();
        LOG.debug("token: {}", token);
    }
    final Configuration conf = getConfig();
    long tokenLifeTime = (long) (dag.getValue(LogicalPlan.TOKEN_REFRESH_ANTICIPATORY_FACTOR) * Math.min(dag.getValue(LogicalPlan.HDFS_TOKEN_LIFE_TIME), dag.getValue(LogicalPlan.RM_TOKEN_LIFE_TIME)));
    long expiryTime = System.currentTimeMillis() + tokenLifeTime;
    LOG.debug(" expiry token time {}", tokenLifeTime);
    String principal = dag.getValue(LogicalPlan.PRINCIPAL);
    String hdfsKeyTabFile = dag.getValue(LogicalPlan.KEY_TAB_FILE);
    // Register self with ResourceManager
    RegisterApplicationMasterResponse response = amRmClient.registerApplicationMaster(appMasterHostname, 0, appMasterTrackingUrl);
    // Dump out information about cluster capability as seen by the resource manager
    int maxMem = response.getMaximumResourceCapability().getMemory();
    int maxVcores = response.getMaximumResourceCapability().getVirtualCores();
    int minMem = conf.getInt("yarn.scheduler.minimum-allocation-mb", 0);
    int minVcores = conf.getInt("yarn.scheduler.minimum-allocation-vcores", 0);
    LOG.info("Max mem {}m, Min mem {}m, Max vcores {} and Min vcores {} capabililty of resources in this cluster ", maxMem, minMem, maxVcores, minVcores);
    long blacklistRemovalTime = dag.getValue(DAGContext.BLACKLISTED_NODE_REMOVAL_TIME_MILLIS);
    int maxConsecutiveContainerFailures = dag.getValue(DAGContext.MAX_CONSECUTIVE_CONTAINER_FAILURES_FOR_BLACKLIST);
    LOG.info("Blacklist removal time in millis = {}, max consecutive node failure count = {}", blacklistRemovalTime, maxConsecutiveContainerFailures);
    // for locality relaxation fall back
    Map<StreamingContainerAgent.ContainerStartRequest, MutablePair<Integer, ContainerRequest>> requestedResources = Maps.newHashMap();
    // Setup heartbeat emitter
    // TODO poll RM every now and then with an empty request to let RM know that we are alive
    // The heartbeat interval after which an AM is timed out by the RM is defined by a config setting:
    // RM_AM_EXPIRY_INTERVAL_MS with default defined by DEFAULT_RM_AM_EXPIRY_INTERVAL_MS
    // The allocate calls to the RM count as heartbeat so, for now, this additional heartbeat emitter
    // is not required.
    int loopCounter = -1;
    long nodeReportUpdateTime = 0;
    // keep track of already requested containers to not request them again while waiting for allocation
    int numRequestedContainers = 0;
    int numReleasedContainers = 0;
    int nextRequestPriority = 0;
    // Use override for resource requestor in case of cloudera distribution, to handle host specific requests
    ResourceRequestHandler resourceRequestor = System.getenv().containsKey("CDH_HADOOP_BIN") ? new BlacklistBasedResourceRequestHandler() : new ResourceRequestHandler();
    List<ContainerStartRequest> pendingContainerStartRequests = new LinkedList<>();
    YarnClient clientRMService = YarnClient.createYarnClient();
    try {
        // YARN-435
        // we need getClusterNodes to populate the initial node list,
        // subsequent updates come through the heartbeat response
        clientRMService.init(conf);
        clientRMService.start();
        ApplicationReport ar = StramClientUtils.getStartedAppInstanceByName(clientRMService, dag.getAttributes().get(DAG.APPLICATION_NAME), UserGroupInformation.getLoginUser().getUserName(), dag.getAttributes().get(DAG.APPLICATION_ID));
        if (ar != null) {
            appDone = true;
            dnmgr.shutdownDiagnosticsMessage = String.format("Application master failed due to application %s with duplicate application name \"%s\" by the same user \"%s\" is already started.", ar.getApplicationId().toString(), ar.getName(), ar.getUser());
            LOG.info("Forced shutdown due to {}", dnmgr.shutdownDiagnosticsMessage);
            finishApplication(FinalApplicationStatus.FAILED);
            return;
        }
        resourceRequestor.updateNodeReports(clientRMService.getNodeReports());
        nodeReportUpdateTime = System.currentTimeMillis() + UPDATE_NODE_REPORTS_INTERVAL;
    } catch (Exception e) {
        throw new RuntimeException("Failed to retrieve cluster nodes report.", e);
    } finally {
        clientRMService.stop();
    }
    List<Container> containers = response.getContainersFromPreviousAttempts();
    // Running containers might take a while to register with the new app master and send the heartbeat signal.
    int waitForRecovery = containers.size() > 0 ? dag.getValue(LogicalPlan.HEARTBEAT_TIMEOUT_MILLIS) / 1000 : 0;
    List<ContainerId> releasedContainers = previouslyAllocatedContainers(containers);
    FinalApplicationStatus finalStatus = FinalApplicationStatus.SUCCEEDED;
    final InetSocketAddress rmAddress = conf.getSocketAddr(YarnConfiguration.RM_ADDRESS, YarnConfiguration.DEFAULT_RM_ADDRESS, YarnConfiguration.DEFAULT_RM_PORT);
    while (!appDone) {
        loopCounter++;
        final long currentTimeMillis = System.currentTimeMillis();
        if (UserGroupInformation.isSecurityEnabled() && currentTimeMillis >= expiryTime && hdfsKeyTabFile != null) {
            String applicationId = appAttemptID.getApplicationId().toString();
            expiryTime = StramUserLogin.refreshTokens(tokenLifeTime, FileUtils.getTempDirectoryPath(), applicationId, conf, principal, hdfsKeyTabFile, credentials, rmAddress, true);
        }
        if (currentTimeMillis > nodeReportUpdateTime) {
            resourceRequestor.updateNodeReports(clientRMService.getNodeReports());
            nodeReportUpdateTime = currentTimeMillis + UPDATE_NODE_REPORTS_INTERVAL;
        }
        Runnable r;
        while ((r = this.pendingTasks.poll()) != null) {
            r.run();
        }
        // need not have any available containers
        try {
            sleep(1000);
        } catch (InterruptedException e) {
            LOG.info("Sleep interrupted " + e.getMessage());
        }
        // Setup request to be sent to RM to allocate containers
        List<ContainerRequest> containerRequests = new ArrayList<>();
        List<ContainerRequest> removedContainerRequests = new ArrayList<>();
        // request containers for pending deploy requests
        if (!dnmgr.containerStartRequests.isEmpty()) {
            StreamingContainerAgent.ContainerStartRequest csr;
            while ((csr = dnmgr.containerStartRequests.poll()) != null) {
                if (csr.container.getRequiredMemoryMB() > maxMem) {
                    LOG.warn("Container memory {}m above max threshold of cluster. Using max value {}m.", csr.container.getRequiredMemoryMB(), maxMem);
                    csr.container.setRequiredMemoryMB(maxMem);
                }
                if (csr.container.getRequiredMemoryMB() < minMem) {
                    csr.container.setRequiredMemoryMB(minMem);
                }
                if (csr.container.getRequiredVCores() > maxVcores) {
                    LOG.warn("Container vcores {} above max threshold of cluster. Using max value {}.", csr.container.getRequiredVCores(), maxVcores);
                    csr.container.setRequiredVCores(maxVcores);
                }
                if (csr.container.getRequiredVCores() < minVcores) {
                    csr.container.setRequiredVCores(minVcores);
                }
                csr.container.setResourceRequestPriority(nextRequestPriority++);
                ContainerRequest cr = resourceRequestor.createContainerRequest(csr, true);
                if (cr == null) {
                    pendingContainerStartRequests.add(csr);
                } else {
                    resourceRequestor.addContainerRequest(requestedResources, loopCounter, containerRequests, csr, cr);
                }
            }
        }
        // If all other requests are allocated, retry pending requests which need host availability
        if (containerRequests.isEmpty() && !pendingContainerStartRequests.isEmpty()) {
            List<ContainerStartRequest> removalList = new LinkedList<>();
            for (ContainerStartRequest csr : pendingContainerStartRequests) {
                ContainerRequest cr = resourceRequestor.createContainerRequest(csr, true);
                if (cr != null) {
                    resourceRequestor.addContainerRequest(requestedResources, loopCounter, containerRequests, csr, cr);
                    removalList.add(csr);
                }
            }
            pendingContainerStartRequests.removeAll(removalList);
        }
        resourceRequestor.reissueContainerRequests(amRmClient, requestedResources, loopCounter, resourceRequestor, containerRequests, removedContainerRequests);
        /* Remove nodes from blacklist after timeout */
        List<String> blacklistRemovals = new ArrayList<>();
        for (String hostname : failedBlackListedNodes) {
            Long timeDiff = currentTimeMillis - failedContainerNodesMap.get(hostname).blackListAdditionTime;
            if (timeDiff >= blacklistRemovalTime) {
                blacklistRemovals.add(hostname);
                failedContainerNodesMap.remove(hostname);
            }
        }
        if (!blacklistRemovals.isEmpty()) {
            amRmClient.updateBlacklist(null, blacklistRemovals);
            LOG.info("Removing nodes {} from blacklist: time elapsed since last blacklisting due to failure is greater than specified timeout", blacklistRemovals.toString());
            failedBlackListedNodes.removeAll(blacklistRemovals);
        }
        numRequestedContainers += containerRequests.size() - removedContainerRequests.size();
        AllocateResponse amResp = sendContainerAskToRM(containerRequests, removedContainerRequests, releasedContainers);
        if (amResp.getAMCommand() != null) {
            LOG.info(" statement executed:{}", amResp.getAMCommand());
            switch(amResp.getAMCommand()) {
                case AM_RESYNC:
                case AM_SHUTDOWN:
                    throw new YarnRuntimeException("Received the " + amResp.getAMCommand() + " command from RM");
                default:
                    throw new YarnRuntimeException("Received the " + amResp.getAMCommand() + " command from RM");
            }
        }
        releasedContainers.clear();
        // Retrieve list of allocated containers from the response
        List<Container> newAllocatedContainers = amResp.getAllocatedContainers();
        // LOG.info("Got response from RM for container ask, allocatedCnt=" + newAllocatedContainers.size());
        numRequestedContainers -= newAllocatedContainers.size();
        long timestamp = System.currentTimeMillis();
        for (Container allocatedContainer : newAllocatedContainers) {
            LOG.info("Got new container." + ", containerId=" + allocatedContainer.getId() + ", containerNode=" + allocatedContainer.getNodeId() + ", containerNodeURI=" + allocatedContainer.getNodeHttpAddress() + ", containerResourceMemory" + allocatedContainer.getResource().getMemory() + ", priority" + allocatedContainer.getPriority());
            // + ", containerToken" + allocatedContainer.getContainerToken().getIdentifier().toString());
            boolean alreadyAllocated = true;
            StreamingContainerAgent.ContainerStartRequest csr = null;
            for (Map.Entry<StreamingContainerAgent.ContainerStartRequest, MutablePair<Integer, ContainerRequest>> entry : requestedResources.entrySet()) {
                if (entry.getKey().container.getResourceRequestPriority() == allocatedContainer.getPriority().getPriority()) {
                    alreadyAllocated = false;
                    csr = entry.getKey();
                    break;
                }
            }
            if (alreadyAllocated) {
                LOG.info("Releasing {} as resource with priority {} was already assigned", allocatedContainer.getId(), allocatedContainer.getPriority());
                releasedContainers.add(allocatedContainer.getId());
                numReleasedContainers++;
                numRequestedContainers--;
                continue;
            }
            if (csr != null) {
                requestedResources.remove(csr);
            }
            // allocate resource to container
            ContainerResource resource = new ContainerResource(allocatedContainer.getPriority().getPriority(), allocatedContainer.getId().toString(), allocatedContainer.getNodeId().toString(), allocatedContainer.getResource().getMemory(), allocatedContainer.getResource().getVirtualCores(), allocatedContainer.getNodeHttpAddress());
            StreamingContainerAgent sca = dnmgr.assignContainer(resource, null);
            if (sca == null) {
                // allocated container no longer needed, add release request
                LOG.warn("Container {} allocated but nothing to deploy, going to release this container.", allocatedContainer.getId());
                releasedContainers.add(allocatedContainer.getId());
            } else {
                AllocatedContainer allocatedContainerHolder = new AllocatedContainer(allocatedContainer);
                this.allocatedContainers.put(allocatedContainer.getId().toString(), allocatedContainerHolder);
                ByteBuffer tokens = null;
                if (UserGroupInformation.isSecurityEnabled()) {
                    UserGroupInformation ugi = UserGroupInformation.getLoginUser();
                    Token<StramDelegationTokenIdentifier> delegationToken = allocateDelegationToken(ugi.getUserName(), heartbeatListener.getAddress());
                    allocatedContainerHolder.delegationToken = delegationToken;
                    //ByteBuffer tokens = LaunchContainerRunnable.getTokens(delegationTokenManager, heartbeatListener.getAddress());
                    tokens = LaunchContainerRunnable.getTokens(ugi, delegationToken);
                }
                LaunchContainerRunnable launchContainer = new LaunchContainerRunnable(allocatedContainer, nmClient, sca, tokens);
                // Thread launchThread = new Thread(runnableLaunchContainer);
                // launchThreads.add(launchThread);
                // launchThread.start();
                // communication with NMs is now async
                launchContainer.run();
                // record container start event
                StramEvent ev = new StramEvent.StartContainerEvent(allocatedContainer.getId().toString(), allocatedContainer.getNodeId().toString());
                ev.setTimestamp(timestamp);
                dnmgr.recordEventAsync(ev);
            }
        }
        // track node updates for future locality constraint allocations
        // TODO: it seems 2.0.4-alpha doesn't give us any updates
        resourceRequestor.updateNodeReports(amResp.getUpdatedNodes());
        // Check the completed containers
        List<ContainerStatus> completedContainers = amResp.getCompletedContainersStatuses();
        // LOG.debug("Got response from RM for container ask, completedCnt=" + completedContainers.size());
        List<String> blacklistAdditions = new ArrayList<>();
        for (ContainerStatus containerStatus : completedContainers) {
            LOG.info("Completed containerId=" + containerStatus.getContainerId() + ", state=" + containerStatus.getState() + ", exitStatus=" + containerStatus.getExitStatus() + ", diagnostics=" + containerStatus.getDiagnostics());
            // non complete containers should not be here
            assert (containerStatus.getState() == ContainerState.COMPLETE);
            AllocatedContainer allocatedContainer = allocatedContainers.remove(containerStatus.getContainerId().toString());
            if (allocatedContainer != null && allocatedContainer.delegationToken != null) {
                UserGroupInformation ugi = UserGroupInformation.getLoginUser();
                delegationTokenManager.cancelToken(allocatedContainer.delegationToken, ugi.getUserName());
            }
            int exitStatus = containerStatus.getExitStatus();
            if (0 != exitStatus) {
                if (allocatedContainer != null) {
                    numFailedContainers.incrementAndGet();
                    if (exitStatus != 1 && maxConsecutiveContainerFailures != Integer.MAX_VALUE) {
                        // If container failure due to framework
                        String hostname = allocatedContainer.container.getNodeId().getHost();
                        if (!failedBlackListedNodes.contains(hostname)) {
                            // Blacklist the node if not already blacklisted
                            if (failedContainerNodesMap.containsKey(hostname)) {
                                NodeFailureStats stats = failedContainerNodesMap.get(hostname);
                                long timeStamp = System.currentTimeMillis();
                                if (timeStamp - stats.lastFailureTimeStamp >= blacklistRemovalTime) {
                                    // Reset failure count if last failure was before Blacklist removal time
                                    stats.failureCount = 1;
                                    stats.lastFailureTimeStamp = timeStamp;
                                } else {
                                    stats.lastFailureTimeStamp = timeStamp;
                                    stats.failureCount++;
                                    if (stats.failureCount >= maxConsecutiveContainerFailures) {
                                        LOG.info("Node {} failed {} times consecutively within {} minutes, marking the node blacklisted", hostname, stats.failureCount, blacklistRemovalTime / (60 * 1000));
                                        blacklistAdditions.add(hostname);
                                        failedBlackListedNodes.add(hostname);
                                    }
                                }
                            } else {
                                failedContainerNodesMap.put(hostname, new NodeFailureStats(System.currentTimeMillis(), 1));
                            }
                        }
                    }
                }
                //          if (exitStatus == 1) {
                //            // non-recoverable StreamingContainer failure
                //            appDone = true;
                //            finalStatus = FinalApplicationStatus.FAILED;
                //            dnmgr.shutdownDiagnosticsMessage = "Unrecoverable failure " + containerStatus.getContainerId();
                //            LOG.info("Exiting due to: {}", dnmgr.shutdownDiagnosticsMessage);
                //          }
                //          else {
                // Recoverable failure or process killed (externally or via stop request by AM)
                // also occurs when a container was released by the application but never assigned/launched
                LOG.debug("Container {} failed or killed.", containerStatus.getContainerId());
                dnmgr.scheduleContainerRestart(containerStatus.getContainerId().toString());
            //          }
            } else {
                // container completed successfully
                numCompletedContainers.incrementAndGet();
                LOG.info("Container completed successfully." + ", containerId=" + containerStatus.getContainerId());
                // Reset counter for node failure, if exists
                String hostname = allocatedContainer.container.getNodeId().getHost();
                NodeFailureStats stats = failedContainerNodesMap.get(hostname);
                if (stats != null) {
                    stats.failureCount = 0;
                }
            }
            String containerIdStr = containerStatus.getContainerId().toString();
            dnmgr.removeContainerAgent(containerIdStr);
            // record container stop event
            StramEvent ev = new StramEvent.StopContainerEvent(containerIdStr, containerStatus.getExitStatus());
            ev.setReason(containerStatus.getDiagnostics());
            dnmgr.recordEventAsync(ev);
        }
        if (!blacklistAdditions.isEmpty()) {
            amRmClient.updateBlacklist(blacklistAdditions, null);
            long timeStamp = System.currentTimeMillis();
            for (String hostname : blacklistAdditions) {
                NodeFailureStats stats = failedContainerNodesMap.get(hostname);
                stats.blackListAdditionTime = timeStamp;
            }
        }
        if (dnmgr.forcedShutdown) {
            LOG.info("Forced shutdown due to {}", dnmgr.shutdownDiagnosticsMessage);
            finalStatus = FinalApplicationStatus.FAILED;
            appDone = true;
        } else if (allocatedContainers.isEmpty() && numRequestedContainers == 0 && dnmgr.containerStartRequests.isEmpty()) {
            LOG.debug("Exiting as no more containers are allocated or requested");
            finalStatus = FinalApplicationStatus.SUCCEEDED;
            appDone = true;
        }
        LOG.debug("Current application state: loop={}, appDone={}, requested={}, released={}, completed={}, failed={}, currentAllocated={}, dnmgr.containerStartRequests={}", loopCounter, appDone, numRequestedContainers, numReleasedContainers, numCompletedContainers, numFailedContainers, allocatedContainers.size(), dnmgr.containerStartRequests);
        // monitor child containers
        dnmgr.monitorHeartbeat(waitForRecovery > 0);
        waitForRecovery = Math.max(waitForRecovery - 1, 0);
    }
    finishApplication(finalStatus);
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) YarnConfiguration(org.apache.hadoop.yarn.conf.YarnConfiguration) FinalApplicationStatus(org.apache.hadoop.yarn.api.records.FinalApplicationStatus) InetSocketAddress(java.net.InetSocketAddress) ArrayList(java.util.ArrayList) Token(org.apache.hadoop.security.token.Token) AllocateResponse(org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse) PTContainer(com.datatorrent.stram.plan.physical.PTContainer) Container(org.apache.hadoop.yarn.api.records.Container) StreamingContainer(com.datatorrent.stram.engine.StreamingContainer) ContainerStatus(org.apache.hadoop.yarn.api.records.ContainerStatus) ContainerStartRequest(com.datatorrent.stram.StreamingContainerAgent.ContainerStartRequest) ContainerId(org.apache.hadoop.yarn.api.records.ContainerId) ContainerRequest(org.apache.hadoop.yarn.client.api.AMRMClient.ContainerRequest) UserGroupInformation(org.apache.hadoop.security.UserGroupInformation) ContainerStartRequest(com.datatorrent.stram.StreamingContainerAgent.ContainerStartRequest) LinkedList(java.util.LinkedList) ApplicationReport(org.apache.hadoop.yarn.api.records.ApplicationReport) YarnRuntimeException(org.apache.hadoop.yarn.exceptions.YarnRuntimeException) ContainerResource(com.datatorrent.stram.StreamingContainerManager.ContainerResource) RegisterApplicationMasterResponse(org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterResponse) Map(java.util.Map) HashMap(java.util.HashMap) ConcurrentMap(java.util.concurrent.ConcurrentMap) StramDelegationTokenIdentifier(com.datatorrent.stram.security.StramDelegationTokenIdentifier) StramEvent(com.datatorrent.stram.api.StramEvent) MutablePair(org.apache.commons.lang3.tuple.MutablePair) YarnRuntimeException(org.apache.hadoop.yarn.exceptions.YarnRuntimeException) ByteBuffer(java.nio.ByteBuffer) YarnClient(org.apache.hadoop.yarn.client.api.YarnClient) YarnException(org.apache.hadoop.yarn.exceptions.YarnException) IOException(java.io.IOException) YarnRuntimeException(org.apache.hadoop.yarn.exceptions.YarnRuntimeException) Credentials(org.apache.hadoop.security.Credentials)

Example 5 with EMPTY

use of org.apache.commons.lang3.StringUtils.EMPTY in project asterixdb by apache.

the class ActivityClusterGraphRewriter method rewriteIntraActivityCluster.

/**
     * rewrite an activity cluster internally
     *
     * @param ac
     *            the activity cluster to be rewritten
     */
private void rewriteIntraActivityCluster(ActivityCluster ac, Map<IActivity, SuperActivity> invertedActivitySuperActivityMap) {
    Map<ActivityId, IActivity> activities = ac.getActivityMap();
    Map<ActivityId, List<IConnectorDescriptor>> activityInputMap = ac.getActivityInputMap();
    Map<ActivityId, List<IConnectorDescriptor>> activityOutputMap = ac.getActivityOutputMap();
    Map<ConnectorDescriptorId, Pair<Pair<IActivity, Integer>, Pair<IActivity, Integer>>> connectorActivityMap = ac.getConnectorActivityMap();
    ActivityClusterGraph acg = ac.getActivityClusterGraph();
    Map<ActivityId, IActivity> startActivities = new HashMap<ActivityId, IActivity>();
    Map<ActivityId, SuperActivity> superActivities = new HashMap<ActivityId, SuperActivity>();
    Map<ActivityId, Queue<IActivity>> toBeExpendedMap = new HashMap<ActivityId, Queue<IActivity>>();
    /**
         * Build the initial super activities
         */
    for (Entry<ActivityId, IActivity> entry : activities.entrySet()) {
        ActivityId activityId = entry.getKey();
        IActivity activity = entry.getValue();
        if (activityInputMap.get(activityId) == null) {
            startActivities.put(activityId, activity);
            /**
                 * use the start activity's id as the id of the super activity
                 */
            createNewSuperActivity(ac, superActivities, toBeExpendedMap, invertedActivitySuperActivityMap, activityId, activity);
        }
    }
    /**
         * expand one-to-one connected activity cluster by the BFS order.
         * after the while-loop, the original activities are partitioned
         * into equivalent classes, one-per-super-activity.
         */
    Map<ActivityId, SuperActivity> clonedSuperActivities = new HashMap<ActivityId, SuperActivity>();
    while (toBeExpendedMap.size() > 0) {
        clonedSuperActivities.clear();
        clonedSuperActivities.putAll(superActivities);
        for (Entry<ActivityId, SuperActivity> entry : clonedSuperActivities.entrySet()) {
            ActivityId superActivityId = entry.getKey();
            SuperActivity superActivity = entry.getValue();
            /**
                 * for the case where the super activity has already been swallowed
                 */
            if (superActivities.get(superActivityId) == null) {
                continue;
            }
            /**
                 * expend the super activity
                 */
            Queue<IActivity> toBeExpended = toBeExpendedMap.get(superActivityId);
            if (toBeExpended == null) {
                /**
                     * Nothing to expand
                     */
                continue;
            }
            IActivity expendingActivity = toBeExpended.poll();
            List<IConnectorDescriptor> outputConnectors = activityOutputMap.get(expendingActivity.getActivityId());
            if (outputConnectors != null) {
                for (IConnectorDescriptor outputConn : outputConnectors) {
                    Pair<Pair<IActivity, Integer>, Pair<IActivity, Integer>> endPoints = connectorActivityMap.get(outputConn.getConnectorId());
                    IActivity newActivity = endPoints.getRight().getLeft();
                    SuperActivity existingSuperActivity = invertedActivitySuperActivityMap.get(newActivity);
                    if (outputConn.getClass().getName().contains(ONE_TO_ONE_CONNECTOR)) {
                        /**
                             * expend the super activity cluster on an one-to-one out-bound connection
                             */
                        if (existingSuperActivity == null) {
                            superActivity.addActivity(newActivity);
                            toBeExpended.add(newActivity);
                            invertedActivitySuperActivityMap.put(newActivity, superActivity);
                        } else {
                            /**
                                 * the two activities already in the same super activity
                                 */
                            if (existingSuperActivity == superActivity) {
                                continue;
                            }
                            /**
                                 * swallow an existing super activity
                                 */
                            swallowExistingSuperActivity(superActivities, toBeExpendedMap, invertedActivitySuperActivityMap, superActivity, superActivityId, existingSuperActivity);
                        }
                    } else {
                        if (existingSuperActivity == null) {
                            /**
                                 * create new activity
                                 */
                            createNewSuperActivity(ac, superActivities, toBeExpendedMap, invertedActivitySuperActivityMap, newActivity.getActivityId(), newActivity);
                        }
                    }
                }
            }
            /**
                 * remove the to-be-expended queue if it is empty
                 */
            if (toBeExpended.size() == 0) {
                toBeExpendedMap.remove(superActivityId);
            }
        }
    }
    Map<ConnectorDescriptorId, IConnectorDescriptor> connMap = ac.getConnectorMap();
    Map<ConnectorDescriptorId, RecordDescriptor> connRecordDesc = ac.getConnectorRecordDescriptorMap();
    Map<SuperActivity, Integer> superActivityProducerPort = new HashMap<SuperActivity, Integer>();
    Map<SuperActivity, Integer> superActivityConsumerPort = new HashMap<SuperActivity, Integer>();
    for (Entry<ActivityId, SuperActivity> entry : superActivities.entrySet()) {
        superActivityProducerPort.put(entry.getValue(), 0);
        superActivityConsumerPort.put(entry.getValue(), 0);
    }
    /**
         * create a new activity cluster to replace the old activity cluster
         */
    ActivityCluster newActivityCluster = new ActivityCluster(acg, ac.getId());
    newActivityCluster.setConnectorPolicyAssignmentPolicy(ac.getConnectorPolicyAssignmentPolicy());
    for (Entry<ActivityId, SuperActivity> entry : superActivities.entrySet()) {
        newActivityCluster.addActivity(entry.getValue());
        acg.getActivityMap().put(entry.getKey(), newActivityCluster);
    }
    /**
         * Setup connectors: either inside a super activity or among super activities
         */
    for (Entry<ConnectorDescriptorId, Pair<Pair<IActivity, Integer>, Pair<IActivity, Integer>>> entry : connectorActivityMap.entrySet()) {
        ConnectorDescriptorId connectorId = entry.getKey();
        Pair<Pair<IActivity, Integer>, Pair<IActivity, Integer>> endPoints = entry.getValue();
        IActivity producerActivity = endPoints.getLeft().getLeft();
        IActivity consumerActivity = endPoints.getRight().getLeft();
        int producerPort = endPoints.getLeft().getRight();
        int consumerPort = endPoints.getRight().getRight();
        RecordDescriptor recordDescriptor = connRecordDesc.get(connectorId);
        IConnectorDescriptor conn = connMap.get(connectorId);
        if (conn.getClass().getName().contains(ONE_TO_ONE_CONNECTOR)) {
            /**
                 * connection edge between inner activities
                 */
            SuperActivity residingSuperActivity = invertedActivitySuperActivityMap.get(producerActivity);
            residingSuperActivity.connect(conn, producerActivity, producerPort, consumerActivity, consumerPort, recordDescriptor);
        } else {
            /**
                 * connection edge between super activities
                 */
            SuperActivity producerSuperActivity = invertedActivitySuperActivityMap.get(producerActivity);
            SuperActivity consumerSuperActivity = invertedActivitySuperActivityMap.get(consumerActivity);
            int producerSAPort = superActivityProducerPort.get(producerSuperActivity);
            int consumerSAPort = superActivityConsumerPort.get(consumerSuperActivity);
            newActivityCluster.addConnector(conn);
            newActivityCluster.connect(conn, producerSuperActivity, producerSAPort, consumerSuperActivity, consumerSAPort, recordDescriptor);
            /**
                 * bridge the port
                 */
            producerSuperActivity.setClusterOutputIndex(producerSAPort, producerActivity.getActivityId(), producerPort);
            consumerSuperActivity.setClusterInputIndex(consumerSAPort, consumerActivity.getActivityId(), consumerPort);
            acg.getConnectorMap().put(connectorId, newActivityCluster);
            /**
                 * increasing the port number for the producer and consumer
                 */
            superActivityProducerPort.put(producerSuperActivity, ++producerSAPort);
            superActivityConsumerPort.put(consumerSuperActivity, ++consumerSAPort);
        }
    }
    /**
         * Set up the roots of the new activity cluster
         */
    for (Entry<ActivityId, SuperActivity> entry : superActivities.entrySet()) {
        List<IConnectorDescriptor> connIds = newActivityCluster.getActivityOutputMap().get(entry.getKey());
        if (connIds == null || connIds.size() == 0) {
            newActivityCluster.addRoot(entry.getValue());
        }
    }
    /**
         * set up the blocked2Blocker mapping, which will be updated in the rewriteInterActivityCluster call
         */
    newActivityCluster.getBlocked2BlockerMap().putAll(ac.getBlocked2BlockerMap());
    /**
         * replace the old activity cluster with the new activity cluster
         */
    acg.getActivityClusterMap().put(ac.getId(), newActivityCluster);
}
Also used : HashMap(java.util.HashMap) RecordDescriptor(org.apache.hyracks.api.dataflow.value.RecordDescriptor) ActivityId(org.apache.hyracks.api.dataflow.ActivityId) ConnectorDescriptorId(org.apache.hyracks.api.dataflow.ConnectorDescriptorId) IActivity(org.apache.hyracks.api.dataflow.IActivity) SuperActivity(org.apache.hyracks.api.rewriter.runtime.SuperActivity) List(java.util.List) LinkedList(java.util.LinkedList) Queue(java.util.Queue) Pair(org.apache.commons.lang3.tuple.Pair) IConnectorDescriptor(org.apache.hyracks.api.dataflow.IConnectorDescriptor) ActivityCluster(org.apache.hyracks.api.job.ActivityCluster) ActivityClusterGraph(org.apache.hyracks.api.job.ActivityClusterGraph)

Aggregations

List (java.util.List)44 Map (java.util.Map)42 ArrayList (java.util.ArrayList)41 StringUtils (org.apache.commons.lang3.StringUtils)38 Collectors (java.util.stream.Collectors)37 HashMap (java.util.HashMap)33 IOException (java.io.IOException)27 Set (java.util.Set)25 HashSet (java.util.HashSet)22 LoggerFactory (org.slf4j.LoggerFactory)22 Pair (org.apache.commons.lang3.tuple.Pair)20 Logger (org.slf4j.Logger)20 Optional (java.util.Optional)19 Collections (java.util.Collections)17 ImmutablePair (org.apache.commons.lang3.tuple.ImmutablePair)17 java.util (java.util)15 Arrays.asList (java.util.Arrays.asList)14 Collection (java.util.Collection)14 Stream (java.util.stream.Stream)14 Arrays (java.util.Arrays)12