Search in sources :

Example 1 with Configurable

use of org.apache.hadoop.conf.Configurable in project hadoop by apache.

the class TestAvailableSpaceVolumeChoosingPolicy method initPolicy.

private static void initPolicy(VolumeChoosingPolicy<FsVolumeSpi> policy, float preferencePercent) {
    Configuration conf = new Configuration();
    // Set the threshold to consider volumes imbalanced to 1MB
    conf.setLong(DFS_DATANODE_AVAILABLE_SPACE_VOLUME_CHOOSING_POLICY_BALANCED_SPACE_THRESHOLD_KEY, // 1MB
    1024 * 1024);
    conf.setFloat(DFS_DATANODE_AVAILABLE_SPACE_VOLUME_CHOOSING_POLICY_BALANCED_SPACE_PREFERENCE_FRACTION_KEY, preferencePercent);
    ((Configurable) policy).setConf(conf);
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) Configurable(org.apache.hadoop.conf.Configurable)

Example 2 with Configurable

use of org.apache.hadoop.conf.Configurable in project hive by apache.

the class Driver method compile.

// deferClose indicates if the close/destroy should be deferred when the process has been
// interrupted, it should be set to true if the compile is called within another method like
// runInternal, which defers the close to the called in that method.
private void compile(String command, boolean resetTaskIds, boolean deferClose) throws CommandProcessorResponse {
    PerfLogger perfLogger = SessionState.getPerfLogger(true);
    perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.DRIVER_RUN);
    perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.COMPILE);
    lDrvState.stateLock.lock();
    try {
        lDrvState.driverState = DriverState.COMPILING;
    } finally {
        lDrvState.stateLock.unlock();
    }
    command = new VariableSubstitution(new HiveVariableSource() {

        @Override
        public Map<String, String> getHiveVariable() {
            return SessionState.get().getHiveVariables();
        }
    }).substitute(conf, command);
    String queryStr = command;
    try {
        // command should be redacted to avoid to logging sensitive data
        queryStr = HookUtils.redactLogString(conf, command);
    } catch (Exception e) {
        LOG.warn("WARNING! Query command could not be redacted." + e);
    }
    checkInterrupted("at beginning of compilation.", null, null);
    if (ctx != null && ctx.getExplainAnalyze() != AnalyzeState.RUNNING) {
        // close the existing ctx etc before compiling a new query, but does not destroy driver
        closeInProcess(false);
    }
    if (resetTaskIds) {
        TaskFactory.resetId();
    }
    LockedDriverState.setLockedDriverState(lDrvState);
    String queryId = queryState.getQueryId();
    if (ctx != null) {
        setTriggerContext(queryId);
    }
    // save some info for webUI for use after plan is freed
    this.queryDisplay.setQueryStr(queryStr);
    this.queryDisplay.setQueryId(queryId);
    LOG.info("Compiling command(queryId=" + queryId + "): " + queryStr);
    conf.setQueryString(queryStr);
    // FIXME: sideeffect will leave the last query set at the session level
    SessionState.get().getConf().setQueryString(queryStr);
    SessionState.get().setupQueryCurrentTimestamp();
    // Whether any error occurred during query compilation. Used for query lifetime hook.
    boolean compileError = false;
    boolean parseError = false;
    try {
        // Initialize the transaction manager.  This must be done before analyze is called.
        if (initTxnMgr != null) {
            queryTxnMgr = initTxnMgr;
        } else {
            queryTxnMgr = SessionState.get().initTxnMgr(conf);
        }
        if (queryTxnMgr instanceof Configurable) {
            ((Configurable) queryTxnMgr).setConf(conf);
        }
        queryState.setTxnManager(queryTxnMgr);
        // In case when user Ctrl-C twice to kill Hive CLI JVM, we want to release locks
        // if compile is being called multiple times, clear the old shutdownhook
        ShutdownHookManager.removeShutdownHook(shutdownRunner);
        final HiveTxnManager txnMgr = queryTxnMgr;
        shutdownRunner = new Runnable() {

            @Override
            public void run() {
                try {
                    releaseLocksAndCommitOrRollback(false, txnMgr);
                } catch (LockException e) {
                    LOG.warn("Exception when releasing locks in ShutdownHook for Driver: " + e.getMessage());
                }
            }
        };
        ShutdownHookManager.addShutdownHook(shutdownRunner, SHUTDOWN_HOOK_PRIORITY);
        checkInterrupted("before parsing and analysing the query", null, null);
        if (ctx == null) {
            ctx = new Context(conf);
            setTriggerContext(queryId);
        }
        ctx.setRuntimeStatsSource(runtimeStatsSource);
        ctx.setCmd(command);
        ctx.setHDFSCleanup(true);
        perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.PARSE);
        // Trigger query hook before compilation
        hookRunner.runBeforeParseHook(command);
        ASTNode tree;
        try {
            tree = ParseUtils.parse(command, ctx);
        } catch (ParseException e) {
            parseError = true;
            throw e;
        } finally {
            hookRunner.runAfterParseHook(command, parseError);
        }
        perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.PARSE);
        hookRunner.runBeforeCompileHook(command);
        perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.ANALYZE);
        // Flush the metastore cache.  This assures that we don't pick up objects from a previous
        // query running in this same thread.  This has to be done after we get our semantic
        // analyzer (this is when the connection to the metastore is made) but before we analyze,
        // because at that point we need access to the objects.
        Hive.get().getMSC().flushCache();
        BaseSemanticAnalyzer sem;
        // Do semantic analysis and plan generation
        if (hookRunner.hasPreAnalyzeHooks()) {
            HiveSemanticAnalyzerHookContext hookCtx = new HiveSemanticAnalyzerHookContextImpl();
            hookCtx.setConf(conf);
            hookCtx.setUserName(userName);
            hookCtx.setIpAddress(SessionState.get().getUserIpAddress());
            hookCtx.setCommand(command);
            hookCtx.setHiveOperation(queryState.getHiveOperation());
            tree = hookRunner.runPreAnalyzeHooks(hookCtx, tree);
            sem = SemanticAnalyzerFactory.get(queryState, tree);
            openTransaction();
            sem.analyze(tree, ctx);
            hookCtx.update(sem);
            hookRunner.runPostAnalyzeHooks(hookCtx, sem.getAllRootTasks());
        } else {
            sem = SemanticAnalyzerFactory.get(queryState, tree);
            openTransaction();
            sem.analyze(tree, ctx);
        }
        LOG.info("Semantic Analysis Completed");
        // Retrieve information about cache usage for the query.
        if (conf.getBoolVar(HiveConf.ConfVars.HIVE_QUERY_RESULTS_CACHE_ENABLED)) {
            cacheUsage = sem.getCacheUsage();
        }
        // validate the plan
        sem.validate();
        perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.ANALYZE);
        checkInterrupted("after analyzing query.", null, null);
        // get the output schema
        schema = getSchema(sem, conf);
        plan = new QueryPlan(queryStr, sem, perfLogger.getStartTime(PerfLogger.DRIVER_RUN), queryId, queryState.getHiveOperation(), schema);
        conf.set("mapreduce.workflow.id", "hive_" + queryId);
        conf.set("mapreduce.workflow.name", queryStr);
        // initialize FetchTask right here
        if (plan.getFetchTask() != null) {
            plan.getFetchTask().initialize(queryState, plan, null, ctx.getOpContext());
        }
        // do the authorization check
        if (!sem.skipAuthorization() && HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_AUTHORIZATION_ENABLED)) {
            try {
                perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.DO_AUTHORIZATION);
                doAuthorization(queryState.getHiveOperation(), sem, command);
            } catch (AuthorizationException authExp) {
                console.printError("Authorization failed:" + authExp.getMessage() + ". Use SHOW GRANT to get more details.");
                errorMessage = authExp.getMessage();
                SQLState = "42000";
                throw createProcessorResponse(403);
            } finally {
                perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.DO_AUTHORIZATION);
            }
        }
        if (conf.getBoolVar(ConfVars.HIVE_LOG_EXPLAIN_OUTPUT)) {
            String explainOutput = getExplainOutput(sem, plan, tree);
            if (explainOutput != null) {
                LOG.info("EXPLAIN output for queryid " + queryId + " : " + explainOutput);
                if (conf.isWebUiQueryInfoCacheEnabled()) {
                    queryDisplay.setExplainPlan(explainOutput);
                }
            }
        }
    } catch (CommandProcessorResponse cpr) {
        throw cpr;
    } catch (Exception e) {
        checkInterrupted("during query compilation: " + e.getMessage(), null, null);
        compileError = true;
        ErrorMsg error = ErrorMsg.getErrorMsg(e.getMessage());
        errorMessage = "FAILED: " + e.getClass().getSimpleName();
        if (error != ErrorMsg.GENERIC_ERROR) {
            errorMessage += " [Error " + error.getErrorCode() + "]:";
        }
        // HIVE-4889
        if ((e instanceof IllegalArgumentException) && e.getMessage() == null && e.getCause() != null) {
            errorMessage += " " + e.getCause().getMessage();
        } else {
            errorMessage += " " + e.getMessage();
        }
        if (error == ErrorMsg.TXNMGR_NOT_ACID) {
            errorMessage += ". Failed command: " + queryStr;
        }
        SQLState = error.getSQLState();
        downstreamError = e;
        console.printError(errorMessage, "\n" + org.apache.hadoop.util.StringUtils.stringifyException(e));
        throw createProcessorResponse(error.getErrorCode());
    } finally {
        // before/after execution hook will never be executed.
        if (!parseError) {
            try {
                hookRunner.runAfterCompilationHook(command, compileError);
            } catch (Exception e) {
                LOG.warn("Failed when invoking query after-compilation hook.", e);
            }
        }
        double duration = perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.COMPILE) / 1000.00;
        ImmutableMap<String, Long> compileHMSTimings = dumpMetaCallTimingWithoutEx("compilation");
        queryDisplay.setHmsTimings(QueryDisplay.Phase.COMPILATION, compileHMSTimings);
        boolean isInterrupted = lDrvState.isAborted();
        if (isInterrupted && !deferClose) {
            closeInProcess(true);
        }
        lDrvState.stateLock.lock();
        try {
            if (isInterrupted) {
                lDrvState.driverState = deferClose ? DriverState.EXECUTING : DriverState.ERROR;
            } else {
                lDrvState.driverState = compileError ? DriverState.ERROR : DriverState.COMPILED;
            }
        } finally {
            lDrvState.stateLock.unlock();
        }
        if (isInterrupted) {
            LOG.info("Compiling command(queryId=" + queryId + ") has been interrupted after " + duration + " seconds");
        } else {
            LOG.info("Completed compiling command(queryId=" + queryId + "); Time taken: " + duration + " seconds");
        }
    }
}
Also used : HiveSemanticAnalyzerHookContext(org.apache.hadoop.hive.ql.parse.HiveSemanticAnalyzerHookContext) BaseSemanticAnalyzer(org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer) AuthorizationException(org.apache.hadoop.hive.ql.metadata.AuthorizationException) HiveVariableSource(org.apache.hadoop.hive.conf.HiveVariableSource) CommandProcessorResponse(org.apache.hadoop.hive.ql.processors.CommandProcessorResponse) PerfLogger(org.apache.hadoop.hive.ql.log.PerfLogger) Configurable(org.apache.hadoop.conf.Configurable) LockException(org.apache.hadoop.hive.ql.lockmgr.LockException) ASTNode(org.apache.hadoop.hive.ql.parse.ASTNode) PrivateHookContext(org.apache.hadoop.hive.ql.hooks.PrivateHookContext) ParseContext(org.apache.hadoop.hive.ql.parse.ParseContext) HiveAuthzContext(org.apache.hadoop.hive.ql.security.authorization.plugin.HiveAuthzContext) WmContext(org.apache.hadoop.hive.ql.wm.WmContext) HookContext(org.apache.hadoop.hive.ql.hooks.HookContext) HiveSemanticAnalyzerHookContext(org.apache.hadoop.hive.ql.parse.HiveSemanticAnalyzerHookContext) VariableSubstitution(org.apache.hadoop.hive.conf.VariableSubstitution) LockException(org.apache.hadoop.hive.ql.lockmgr.LockException) IOException(java.io.IOException) ParseException(org.apache.hadoop.hive.ql.parse.ParseException) HiveException(org.apache.hadoop.hive.ql.metadata.HiveException) AuthorizationException(org.apache.hadoop.hive.ql.metadata.AuthorizationException) HiveSemanticAnalyzerHookContextImpl(org.apache.hadoop.hive.ql.parse.HiveSemanticAnalyzerHookContextImpl) HiveTxnManager(org.apache.hadoop.hive.ql.lockmgr.HiveTxnManager) ParseException(org.apache.hadoop.hive.ql.parse.ParseException) Map(java.util.Map) LinkedHashMap(java.util.LinkedHashMap) ImmutableMap(com.google.common.collect.ImmutableMap) HashMap(java.util.HashMap)

Example 3 with Configurable

use of org.apache.hadoop.conf.Configurable in project incubator-rya by apache.

the class RdfCloudTripleStoreConnection method evaluateInternal.

@Override
protected CloseableIteration<? extends BindingSet, QueryEvaluationException> evaluateInternal(TupleExpr tupleExpr, final Dataset dataset, BindingSet bindings, final boolean flag) throws SailException {
    verifyIsOpen();
    logger.trace("Incoming query model:\n{}", tupleExpr.toString());
    if (provenanceCollector != null) {
        try {
            provenanceCollector.recordQuery(tupleExpr.toString());
        } catch (final ProvenanceCollectionException e) {
            logger.trace("Provenance failed to record query.", e);
        }
    }
    tupleExpr = tupleExpr.clone();
    final C queryConf = (C) store.getConf().clone();
    if (queryConf == null) {
        // Should not happen, but this is better than a null dereference error.
        throw new SailException("Cloning store.getConf() returned null, aborting.");
    }
    if (bindings != null) {
        final Binding dispPlan = bindings.getBinding(RdfCloudTripleStoreConfiguration.CONF_QUERYPLAN_FLAG);
        if (dispPlan != null) {
            queryConf.setDisplayQueryPlan(Boolean.parseBoolean(dispPlan.getValue().stringValue()));
        }
        final Binding authBinding = bindings.getBinding(RdfCloudTripleStoreConfiguration.CONF_QUERY_AUTH);
        if (authBinding != null) {
            queryConf.setAuths(authBinding.getValue().stringValue().split(","));
        }
        final Binding ttlBinding = bindings.getBinding(RdfCloudTripleStoreConfiguration.CONF_TTL);
        if (ttlBinding != null) {
            queryConf.setTtl(Long.valueOf(ttlBinding.getValue().stringValue()));
        }
        final Binding startTimeBinding = bindings.getBinding(RdfCloudTripleStoreConfiguration.CONF_STARTTIME);
        if (startTimeBinding != null) {
            queryConf.setStartTime(Long.valueOf(startTimeBinding.getValue().stringValue()));
        }
        final Binding performantBinding = bindings.getBinding(RdfCloudTripleStoreConfiguration.CONF_PERFORMANT);
        if (performantBinding != null) {
            queryConf.setBoolean(RdfCloudTripleStoreConfiguration.CONF_PERFORMANT, Boolean.parseBoolean(performantBinding.getValue().stringValue()));
        }
        final Binding inferBinding = bindings.getBinding(RdfCloudTripleStoreConfiguration.CONF_INFER);
        if (inferBinding != null) {
            queryConf.setInfer(Boolean.parseBoolean(inferBinding.getValue().stringValue()));
        }
        final Binding useStatsBinding = bindings.getBinding(RdfCloudTripleStoreConfiguration.CONF_USE_STATS);
        if (useStatsBinding != null) {
            queryConf.setUseStats(Boolean.parseBoolean(useStatsBinding.getValue().stringValue()));
        }
        final Binding offsetBinding = bindings.getBinding(RdfCloudTripleStoreConfiguration.CONF_OFFSET);
        if (offsetBinding != null) {
            queryConf.setOffset(Long.parseLong(offsetBinding.getValue().stringValue()));
        }
        final Binding limitBinding = bindings.getBinding(RdfCloudTripleStoreConfiguration.CONF_LIMIT);
        if (limitBinding != null) {
            queryConf.setLimit(Long.parseLong(limitBinding.getValue().stringValue()));
        }
    } else {
        bindings = new QueryBindingSet();
    }
    if (!(tupleExpr instanceof QueryRoot)) {
        tupleExpr = new QueryRoot(tupleExpr);
    }
    try {
        final List<Class<QueryOptimizer>> optimizers = queryConf.getOptimizers();
        final Class<QueryOptimizer> pcjOptimizer = queryConf.getPcjOptimizer();
        if (pcjOptimizer != null) {
            QueryOptimizer opt = null;
            try {
                final Constructor<QueryOptimizer> construct = pcjOptimizer.getDeclaredConstructor(new Class[] {});
                opt = construct.newInstance();
            } catch (final Exception e) {
            }
            if (opt == null) {
                throw new NoSuchMethodException("Could not find valid constructor for " + pcjOptimizer.getName());
            }
            if (opt instanceof Configurable) {
                ((Configurable) opt).setConf(conf);
            }
            opt.optimize(tupleExpr, dataset, bindings);
        }
        final ParallelEvaluationStrategyImpl strategy = new ParallelEvaluationStrategyImpl(new StoreTripleSource<C>(queryConf, ryaDAO), inferenceEngine, dataset, queryConf);
        (new BindingAssigner()).optimize(tupleExpr, dataset, bindings);
        (new ConstantOptimizer(strategy)).optimize(tupleExpr, dataset, bindings);
        (new CompareOptimizer()).optimize(tupleExpr, dataset, bindings);
        (new ConjunctiveConstraintSplitter()).optimize(tupleExpr, dataset, bindings);
        (new DisjunctiveConstraintOptimizer()).optimize(tupleExpr, dataset, bindings);
        (new SameTermFilterOptimizer()).optimize(tupleExpr, dataset, bindings);
        (new QueryModelNormalizer()).optimize(tupleExpr, dataset, bindings);
        (new IterativeEvaluationOptimizer()).optimize(tupleExpr, dataset, bindings);
        if (!optimizers.isEmpty()) {
            for (final Class<QueryOptimizer> optclz : optimizers) {
                QueryOptimizer result = null;
                try {
                    final Constructor<QueryOptimizer> meth = optclz.getDeclaredConstructor(new Class[] {});
                    result = meth.newInstance();
                } catch (final Exception e) {
                }
                try {
                    final Constructor<QueryOptimizer> meth = optclz.getDeclaredConstructor(EvaluationStrategy.class);
                    result = meth.newInstance(strategy);
                } catch (final Exception e) {
                }
                if (result == null) {
                    throw new NoSuchMethodException("Could not find valid constructor for " + optclz.getName());
                }
                if (result instanceof Configurable) {
                    ((Configurable) result).setConf(conf);
                }
                result.optimize(tupleExpr, dataset, bindings);
            }
        }
        (new FilterOptimizer()).optimize(tupleExpr, dataset, bindings);
        (new OrderLimitOptimizer()).optimize(tupleExpr, dataset, bindings);
        logger.trace("Optimized query model:\n{}", tupleExpr.toString());
        if (queryConf.isInfer() && this.inferenceEngine != null) {
            try {
                tupleExpr.visit(new DomainRangeVisitor(queryConf, inferenceEngine));
                tupleExpr.visit(new SomeValuesFromVisitor(queryConf, inferenceEngine));
                tupleExpr.visit(new AllValuesFromVisitor(queryConf, inferenceEngine));
                tupleExpr.visit(new HasValueVisitor(queryConf, inferenceEngine));
                tupleExpr.visit(new IntersectionOfVisitor(queryConf, inferenceEngine));
                tupleExpr.visit(new ReflexivePropertyVisitor(queryConf, inferenceEngine));
                tupleExpr.visit(new PropertyChainVisitor(queryConf, inferenceEngine));
                tupleExpr.visit(new TransitivePropertyVisitor(queryConf, inferenceEngine));
                tupleExpr.visit(new SymmetricPropertyVisitor(queryConf, inferenceEngine));
                tupleExpr.visit(new InverseOfVisitor(queryConf, inferenceEngine));
                tupleExpr.visit(new SubPropertyOfVisitor(queryConf, inferenceEngine));
                tupleExpr.visit(new SubClassOfVisitor(queryConf, inferenceEngine));
                tupleExpr.visit(new SameAsVisitor(queryConf, inferenceEngine));
                tupleExpr.visit(new OneOfVisitor(queryConf, inferenceEngine));
                tupleExpr.visit(new HasSelfVisitor(queryConf, inferenceEngine));
            } catch (final Exception e) {
                logger.error("Error encountered while visiting query node.", e);
            }
        }
        if (queryConf.isPerformant()) {
            tupleExpr.visit(new SeparateFilterJoinsVisitor());
        // tupleExpr.visit(new FilterTimeIndexVisitor(queryConf));
        // tupleExpr.visit(new PartitionFilterTimeIndexVisitor(queryConf));
        }
        final FilterRangeVisitor rangeVisitor = new FilterRangeVisitor(queryConf);
        tupleExpr.visit(rangeVisitor);
        // this has to be done twice to get replace the statementpatterns with the right ranges
        tupleExpr.visit(rangeVisitor);
        EvaluationStatistics stats = null;
        if (!queryConf.isUseStats() && queryConf.isPerformant() || rdfEvalStatsDAO == null) {
            stats = new DefaultStatistics();
        } else if (queryConf.isUseStats()) {
            if (queryConf.isUseSelectivity()) {
                stats = new RdfCloudTripleStoreSelectivityEvaluationStatistics<C>(queryConf, rdfEvalStatsDAO, selectEvalDAO);
            } else {
                stats = new RdfCloudTripleStoreEvaluationStatistics<C>(queryConf, rdfEvalStatsDAO);
            }
        }
        if (stats != null) {
            if (stats instanceof RdfCloudTripleStoreSelectivityEvaluationStatistics) {
                final QueryJoinSelectOptimizer qjso = new QueryJoinSelectOptimizer(stats, selectEvalDAO);
                qjso.optimize(tupleExpr, dataset, bindings);
            } else {
                final QueryJoinOptimizer qjo = new QueryJoinOptimizer(stats);
                // TODO: Make pluggable
                qjo.optimize(tupleExpr, dataset, bindings);
            }
        }
        final CloseableIteration<BindingSet, QueryEvaluationException> iter = strategy.evaluate(tupleExpr, EmptyBindingSet.getInstance());
        final CloseableIteration<BindingSet, QueryEvaluationException> iterWrap = new CloseableIteration<BindingSet, QueryEvaluationException>() {

            @Override
            public void remove() throws QueryEvaluationException {
                iter.remove();
            }

            @Override
            public BindingSet next() throws QueryEvaluationException {
                return iter.next();
            }

            @Override
            public boolean hasNext() throws QueryEvaluationException {
                return iter.hasNext();
            }

            @Override
            public void close() throws QueryEvaluationException {
                iter.close();
                strategy.shutdown();
            }
        };
        return iterWrap;
    } catch (final QueryEvaluationException e) {
        throw new SailException(e);
    } catch (final Exception e) {
        throw new SailException(e);
    }
}
Also used : OneOfVisitor(org.apache.rya.rdftriplestore.inference.OneOfVisitor) FilterOptimizer(org.openrdf.query.algebra.evaluation.impl.FilterOptimizer) SameTermFilterOptimizer(org.openrdf.query.algebra.evaluation.impl.SameTermFilterOptimizer) EvaluationStatistics(org.openrdf.query.algebra.evaluation.impl.EvaluationStatistics) RdfCloudTripleStoreSelectivityEvaluationStatistics(org.apache.rya.rdftriplestore.evaluation.RdfCloudTripleStoreSelectivityEvaluationStatistics) RdfCloudTripleStoreEvaluationStatistics(org.apache.rya.rdftriplestore.evaluation.RdfCloudTripleStoreEvaluationStatistics) DisjunctiveConstraintOptimizer(org.openrdf.query.algebra.evaluation.impl.DisjunctiveConstraintOptimizer) DomainRangeVisitor(org.apache.rya.rdftriplestore.inference.DomainRangeVisitor) IterativeEvaluationOptimizer(org.openrdf.query.algebra.evaluation.impl.IterativeEvaluationOptimizer) BindingAssigner(org.openrdf.query.algebra.evaluation.impl.BindingAssigner) QueryRoot(org.openrdf.query.algebra.QueryRoot) QueryJoinOptimizer(org.apache.rya.rdftriplestore.evaluation.QueryJoinOptimizer) InverseOfVisitor(org.apache.rya.rdftriplestore.inference.InverseOfVisitor) Binding(org.openrdf.query.Binding) QueryBindingSet(org.openrdf.query.algebra.evaluation.QueryBindingSet) EmptyBindingSet(org.openrdf.query.impl.EmptyBindingSet) BindingSet(org.openrdf.query.BindingSet) ProvenanceCollectionException(org.apache.rya.rdftriplestore.provenance.ProvenanceCollectionException) ReflexivePropertyVisitor(org.apache.rya.rdftriplestore.inference.ReflexivePropertyVisitor) SymmetricPropertyVisitor(org.apache.rya.rdftriplestore.inference.SymmetricPropertyVisitor) QueryJoinSelectOptimizer(org.apache.rya.rdftriplestore.evaluation.QueryJoinSelectOptimizer) QueryBindingSet(org.openrdf.query.algebra.evaluation.QueryBindingSet) IntersectionOfVisitor(org.apache.rya.rdftriplestore.inference.IntersectionOfVisitor) QueryEvaluationException(org.openrdf.query.QueryEvaluationException) RdfCloudTripleStoreEvaluationStatistics(org.apache.rya.rdftriplestore.evaluation.RdfCloudTripleStoreEvaluationStatistics) HasSelfVisitor(org.apache.rya.rdftriplestore.inference.HasSelfVisitor) OrderLimitOptimizer(org.openrdf.query.algebra.evaluation.impl.OrderLimitOptimizer) SameAsVisitor(org.apache.rya.rdftriplestore.inference.SameAsVisitor) HasValueVisitor(org.apache.rya.rdftriplestore.inference.HasValueVisitor) SameTermFilterOptimizer(org.openrdf.query.algebra.evaluation.impl.SameTermFilterOptimizer) SubClassOfVisitor(org.apache.rya.rdftriplestore.inference.SubClassOfVisitor) SeparateFilterJoinsVisitor(org.apache.rya.rdftriplestore.evaluation.SeparateFilterJoinsVisitor) Configurable(org.apache.hadoop.conf.Configurable) PropertyChainVisitor(org.apache.rya.rdftriplestore.inference.PropertyChainVisitor) FilterRangeVisitor(org.apache.rya.rdftriplestore.evaluation.FilterRangeVisitor) ParallelEvaluationStrategyImpl(org.apache.rya.rdftriplestore.evaluation.ParallelEvaluationStrategyImpl) TransitivePropertyVisitor(org.apache.rya.rdftriplestore.inference.TransitivePropertyVisitor) SomeValuesFromVisitor(org.apache.rya.rdftriplestore.inference.SomeValuesFromVisitor) DefaultStatistics(org.apache.rya.rdftriplestore.utils.DefaultStatistics) QueryModelNormalizer(org.openrdf.query.algebra.evaluation.impl.QueryModelNormalizer) ConjunctiveConstraintSplitter(org.openrdf.query.algebra.evaluation.impl.ConjunctiveConstraintSplitter) SubPropertyOfVisitor(org.apache.rya.rdftriplestore.inference.SubPropertyOfVisitor) RdfCloudTripleStoreSelectivityEvaluationStatistics(org.apache.rya.rdftriplestore.evaluation.RdfCloudTripleStoreSelectivityEvaluationStatistics) SailException(org.openrdf.sail.SailException) QueryOptimizer(org.openrdf.query.algebra.evaluation.QueryOptimizer) SailException(org.openrdf.sail.SailException) ProvenanceCollectionException(org.apache.rya.rdftriplestore.provenance.ProvenanceCollectionException) QueryEvaluationException(org.openrdf.query.QueryEvaluationException) NoSuchElementException(java.util.NoSuchElementException) RyaDAOException(org.apache.rya.api.persist.RyaDAOException) CloseableIteration(info.aduna.iteration.CloseableIteration) CompareOptimizer(org.openrdf.query.algebra.evaluation.impl.CompareOptimizer) ConstantOptimizer(org.openrdf.query.algebra.evaluation.impl.ConstantOptimizer) AllValuesFromVisitor(org.apache.rya.rdftriplestore.inference.AllValuesFromVisitor)

Example 4 with Configurable

use of org.apache.hadoop.conf.Configurable in project tez by apache.

the class TestUnorderedPartitionedKVWriter method textTest.

public void textTest(int numRegularRecords, int numPartitions, long availableMemory, int numLargeKeys, int numLargevalues, int numLargeKvPairs, boolean pipeliningEnabled, boolean isFinalMergeEnabled) throws IOException, InterruptedException {
    Partitioner partitioner = new HashPartitioner();
    ApplicationId appId = ApplicationId.newInstance(10000000, 1);
    TezCounters counters = new TezCounters();
    String uniqueId = UUID.randomUUID().toString();
    int dagId = 1;
    String auxiliaryService = defaultConf.get(TezConfiguration.TEZ_AM_SHUFFLE_AUXILIARY_SERVICE_ID, TezConfiguration.TEZ_AM_SHUFFLE_AUXILIARY_SERVICE_ID_DEFAULT);
    OutputContext outputContext = createMockOutputContext(counters, appId, uniqueId, auxiliaryService);
    Random random = new Random();
    Configuration conf = createConfiguration(outputContext, Text.class, Text.class, shouldCompress, -1, HashPartitioner.class);
    conf.setBoolean(TezRuntimeConfiguration.TEZ_RUNTIME_PIPELINED_SHUFFLE_ENABLED, pipeliningEnabled);
    conf.setBoolean(TezRuntimeConfiguration.TEZ_RUNTIME_ENABLE_FINAL_MERGE_IN_OUTPUT, isFinalMergeEnabled);
    CompressionCodec codec = null;
    if (shouldCompress) {
        codec = new DefaultCodec();
        ((Configurable) codec).setConf(conf);
    }
    int numRecordsWritten = 0;
    Map<Integer, Multimap<String, String>> expectedValues = new HashMap<Integer, Multimap<String, String>>();
    for (int i = 0; i < numPartitions; i++) {
        expectedValues.put(i, LinkedListMultimap.<String, String>create());
    }
    UnorderedPartitionedKVWriter kvWriter = new UnorderedPartitionedKVWriterForTest(outputContext, conf, numPartitions, availableMemory);
    int sizePerBuffer = kvWriter.sizePerBuffer;
    BitSet partitionsWithData = new BitSet(numPartitions);
    Text keyText = new Text();
    Text valText = new Text();
    for (int i = 0; i < numRegularRecords; i++) {
        String key = createRandomString(Math.abs(random.nextInt(10)));
        String val = createRandomString(Math.abs(random.nextInt(20)));
        keyText.set(key);
        valText.set(val);
        int partition = partitioner.getPartition(keyText, valText, numPartitions);
        partitionsWithData.set(partition);
        expectedValues.get(partition).put(key, val);
        kvWriter.write(keyText, valText);
        numRecordsWritten++;
    }
    // Write Large key records
    for (int i = 0; i < numLargeKeys; i++) {
        String key = createRandomString(sizePerBuffer + Math.abs(random.nextInt(100)));
        String val = createRandomString(Math.abs(random.nextInt(20)));
        keyText.set(key);
        valText.set(val);
        int partition = partitioner.getPartition(keyText, valText, numPartitions);
        partitionsWithData.set(partition);
        expectedValues.get(partition).put(key, val);
        kvWriter.write(keyText, valText);
        numRecordsWritten++;
    }
    if (pipeliningEnabled) {
        verify(outputContext, times(numLargeKeys)).sendEvents(anyListOf(Event.class));
    }
    // Write Large val records
    for (int i = 0; i < numLargevalues; i++) {
        String key = createRandomString(Math.abs(random.nextInt(10)));
        String val = createRandomString(sizePerBuffer + Math.abs(random.nextInt(100)));
        keyText.set(key);
        valText.set(val);
        int partition = partitioner.getPartition(keyText, valText, numPartitions);
        partitionsWithData.set(partition);
        expectedValues.get(partition).put(key, val);
        kvWriter.write(keyText, valText);
        numRecordsWritten++;
    }
    if (pipeliningEnabled) {
        verify(outputContext, times(numLargevalues + numLargeKeys)).sendEvents(anyListOf(Event.class));
    }
    // Write records where key + val are large (but both can fit in the buffer individually)
    for (int i = 0; i < numLargeKvPairs; i++) {
        String key = createRandomString(sizePerBuffer / 2 + Math.abs(random.nextInt(100)));
        String val = createRandomString(sizePerBuffer / 2 + Math.abs(random.nextInt(100)));
        keyText.set(key);
        valText.set(val);
        int partition = partitioner.getPartition(keyText, valText, numPartitions);
        partitionsWithData.set(partition);
        expectedValues.get(partition).put(key, val);
        kvWriter.write(keyText, valText);
        numRecordsWritten++;
    }
    if (pipeliningEnabled) {
        verify(outputContext, times(numLargevalues + numLargeKeys + numLargeKvPairs)).sendEvents(anyListOf(Event.class));
    }
    List<Event> events = kvWriter.close();
    verify(outputContext, never()).reportFailure(any(TaskFailureType.class), any(Throwable.class), any(String.class));
    if (!pipeliningEnabled) {
        VertexManagerEvent vmEvent = null;
        for (Event event : events) {
            if (event instanceof VertexManagerEvent) {
                assertNull(vmEvent);
                vmEvent = (VertexManagerEvent) event;
            }
        }
        VertexManagerEventPayloadProto vmEventPayload = VertexManagerEventPayloadProto.parseFrom(ByteString.copyFrom(vmEvent.getUserPayload().asReadOnlyBuffer()));
        assertEquals(numRecordsWritten, vmEventPayload.getNumRecord());
    }
    TezCounter outputLargeRecordsCounter = counters.findCounter(TaskCounter.OUTPUT_LARGE_RECORDS);
    assertEquals(numLargeKeys + numLargevalues + numLargeKvPairs, outputLargeRecordsCounter.getValue());
    if (pipeliningEnabled || !isFinalMergeEnabled) {
        // verify spill data files and index file exist
        for (int i = 0; i < kvWriter.numSpills.get(); i++) {
            assertTrue(localFs.exists(kvWriter.outputFileHandler.getSpillFileForWrite(i, 0)));
            assertTrue(localFs.exists(kvWriter.outputFileHandler.getSpillIndexFileForWrite(i, 0)));
        }
        return;
    }
    // Validate the events
    assertEquals(2, events.size());
    assertTrue(events.get(0) instanceof VertexManagerEvent);
    VertexManagerEvent vme = (VertexManagerEvent) events.get(0);
    verifyPartitionStats(vme, partitionsWithData);
    assertTrue(events.get(1) instanceof CompositeDataMovementEvent);
    CompositeDataMovementEvent cdme = (CompositeDataMovementEvent) events.get(1);
    assertEquals(0, cdme.getSourceIndexStart());
    assertEquals(numPartitions, cdme.getCount());
    DataMovementEventPayloadProto eventProto = DataMovementEventPayloadProto.parseFrom(ByteString.copyFrom(cdme.getUserPayload()));
    BitSet emptyPartitionBits = null;
    if (partitionsWithData.cardinality() != numPartitions) {
        assertTrue(eventProto.hasEmptyPartitions());
        byte[] emptyPartitions = TezCommonUtils.decompressByteStringToByteArray(eventProto.getEmptyPartitions());
        emptyPartitionBits = TezUtilsInternal.fromByteArray(emptyPartitions);
        assertEquals(numPartitions - partitionsWithData.cardinality(), emptyPartitionBits.cardinality());
    } else {
        assertFalse(eventProto.hasEmptyPartitions());
        emptyPartitionBits = new BitSet(numPartitions);
    }
    assertEquals(HOST_STRING, eventProto.getHost());
    assertEquals(SHUFFLE_PORT, eventProto.getPort());
    assertEquals(uniqueId, eventProto.getPathComponent());
    // Verify the data
    // Verify the actual data
    TezTaskOutput taskOutput = new TezTaskOutputFiles(conf, uniqueId, dagId);
    Path outputFilePath = kvWriter.finalOutPath;
    Path spillFilePath = kvWriter.finalIndexPath;
    if (numRecordsWritten > 0) {
        assertTrue(localFs.exists(outputFilePath));
        assertTrue(localFs.exists(spillFilePath));
        assertEquals("Incorrect output permissions", (short) 0640, localFs.getFileStatus(outputFilePath).getPermission().toShort());
        assertEquals("Incorrect index permissions", (short) 0640, localFs.getFileStatus(spillFilePath).getPermission().toShort());
    } else {
        return;
    }
    // Special case for 0 records.
    TezSpillRecord spillRecord = new TezSpillRecord(spillFilePath, conf);
    DataInputBuffer keyBuffer = new DataInputBuffer();
    DataInputBuffer valBuffer = new DataInputBuffer();
    Text keyDeser = new Text();
    Text valDeser = new Text();
    for (int i = 0; i < numPartitions; i++) {
        if (emptyPartitionBits.get(i)) {
            continue;
        }
        TezIndexRecord indexRecord = spillRecord.getIndex(i);
        FSDataInputStream inStream = FileSystem.getLocal(conf).open(outputFilePath);
        inStream.seek(indexRecord.getStartOffset());
        IFile.Reader reader = new IFile.Reader(inStream, indexRecord.getPartLength(), codec, null, null, false, 0, -1);
        while (reader.nextRawKey(keyBuffer)) {
            reader.nextRawValue(valBuffer);
            keyDeser.readFields(keyBuffer);
            valDeser.readFields(valBuffer);
            int partition = partitioner.getPartition(keyDeser, valDeser, numPartitions);
            assertTrue(expectedValues.get(partition).remove(keyDeser.toString(), valDeser.toString()));
        }
        inStream.close();
    }
    for (int i = 0; i < numPartitions; i++) {
        assertEquals(0, expectedValues.get(i).size());
        expectedValues.remove(i);
    }
    assertEquals(0, expectedValues.size());
}
Also used : TezTaskOutputFiles(org.apache.tez.runtime.library.common.task.local.output.TezTaskOutputFiles) IFile(org.apache.tez.runtime.library.common.sort.impl.IFile) Configuration(org.apache.hadoop.conf.Configuration) TezConfiguration(org.apache.tez.dag.api.TezConfiguration) TezRuntimeConfiguration(org.apache.tez.runtime.library.api.TezRuntimeConfiguration) HashMap(java.util.HashMap) DefaultCodec(org.apache.hadoop.io.compress.DefaultCodec) ByteString(com.google.protobuf.ByteString) Configurable(org.apache.hadoop.conf.Configurable) TezCounter(org.apache.tez.common.counters.TezCounter) TezSpillRecord(org.apache.tez.runtime.library.common.sort.impl.TezSpillRecord) Random(java.util.Random) DataMovementEventPayloadProto(org.apache.tez.runtime.library.shuffle.impl.ShuffleUserPayloads.DataMovementEventPayloadProto) CompressionCodec(org.apache.hadoop.io.compress.CompressionCodec) Partitioner(org.apache.tez.runtime.library.api.Partitioner) HashPartitioner(org.apache.tez.runtime.library.partitioner.HashPartitioner) Path(org.apache.hadoop.fs.Path) BitSet(java.util.BitSet) Text(org.apache.hadoop.io.Text) TezCounters(org.apache.tez.common.counters.TezCounters) OutputContext(org.apache.tez.runtime.api.OutputContext) LinkedListMultimap(com.google.common.collect.LinkedListMultimap) Multimap(com.google.common.collect.Multimap) VertexManagerEvent(org.apache.tez.runtime.api.events.VertexManagerEvent) DataInputBuffer(org.apache.hadoop.io.DataInputBuffer) TaskFailureType(org.apache.tez.runtime.api.TaskFailureType) CompositeDataMovementEvent(org.apache.tez.runtime.api.events.CompositeDataMovementEvent) TezIndexRecord(org.apache.tez.runtime.library.common.sort.impl.TezIndexRecord) HashPartitioner(org.apache.tez.runtime.library.partitioner.HashPartitioner) Event(org.apache.tez.runtime.api.Event) VertexManagerEvent(org.apache.tez.runtime.api.events.VertexManagerEvent) CompositeDataMovementEvent(org.apache.tez.runtime.api.events.CompositeDataMovementEvent) FSDataInputStream(org.apache.hadoop.fs.FSDataInputStream) ApplicationId(org.apache.hadoop.yarn.api.records.ApplicationId) VertexManagerEventPayloadProto(org.apache.tez.runtime.library.shuffle.impl.ShuffleUserPayloads.VertexManagerEventPayloadProto) TezTaskOutput(org.apache.tez.runtime.library.common.task.local.output.TezTaskOutput)

Example 5 with Configurable

use of org.apache.hadoop.conf.Configurable in project tez by apache.

the class TestUnorderedPartitionedKVWriter method baseTest.

private void baseTest(int numRecords, int numPartitions, Set<Integer> skippedPartitions, boolean shouldCompress, int maxSingleBufferSizeBytes, int bufferMergePercent, int availableMemory) throws IOException, InterruptedException {
    PartitionerForTest partitioner = new PartitionerForTest();
    ApplicationId appId = ApplicationId.newInstance(10000000, 1);
    TezCounters counters = new TezCounters();
    String uniqueId = UUID.randomUUID().toString();
    int dagId = 1;
    String auxiliaryService = defaultConf.get(TezConfiguration.TEZ_AM_SHUFFLE_AUXILIARY_SERVICE_ID, TezConfiguration.TEZ_AM_SHUFFLE_AUXILIARY_SERVICE_ID_DEFAULT);
    OutputContext outputContext = createMockOutputContext(counters, appId, uniqueId, auxiliaryService);
    Configuration conf = createConfiguration(outputContext, IntWritable.class, LongWritable.class, shouldCompress, maxSingleBufferSizeBytes);
    conf.setInt(TezRuntimeConfiguration.TEZ_RUNTIME_UNORDERED_PARTITIONED_KVWRITER_BUFFER_MERGE_PERCENT, bufferMergePercent);
    CompressionCodec codec = null;
    if (shouldCompress) {
        codec = new DefaultCodec();
        ((Configurable) codec).setConf(conf);
    }
    int numOutputs = numPartitions;
    int numRecordsWritten = 0;
    Map<Integer, Multimap<Integer, Long>> expectedValues = new HashMap<Integer, Multimap<Integer, Long>>();
    for (int i = 0; i < numOutputs; i++) {
        expectedValues.put(i, LinkedListMultimap.<Integer, Long>create());
    }
    UnorderedPartitionedKVWriter kvWriter = new UnorderedPartitionedKVWriterForTest(outputContext, conf, numOutputs, availableMemory);
    int sizePerBuffer = kvWriter.sizePerBuffer;
    // IntW + LongW
    int sizePerRecord = 4 + 8;
    // Record + META_OVERHEAD
    int sizePerRecordWithOverhead = sizePerRecord + 12;
    IntWritable intWritable = new IntWritable();
    LongWritable longWritable = new LongWritable();
    BitSet partitionsWithData = new BitSet(numPartitions);
    for (int i = 0; i < numRecords; i++) {
        intWritable.set(i);
        longWritable.set(i);
        int partition = partitioner.getPartition(intWritable, longWritable, numOutputs);
        if (skippedPartitions != null && skippedPartitions.contains(partition)) {
            continue;
        }
        partitionsWithData.set(partition);
        expectedValues.get(partition).put(intWritable.get(), longWritable.get());
        kvWriter.write(intWritable, longWritable);
        numRecordsWritten++;
    }
    List<Event> events = kvWriter.close();
    if (numPartitions == 1) {
        assertEquals(true, kvWriter.skipBuffers);
    }
    int recordsPerBuffer = sizePerBuffer / sizePerRecordWithOverhead;
    int numExpectedSpills = numRecordsWritten / recordsPerBuffer / kvWriter.spillLimit;
    verify(outputContext, never()).reportFailure(any(TaskFailureType.class), any(Throwable.class), any(String.class));
    assertNull(kvWriter.currentBuffer);
    assertEquals(0, kvWriter.availableBuffers.size());
    // Verify the counters
    TezCounter outputRecordBytesCounter = counters.findCounter(TaskCounter.OUTPUT_BYTES);
    TezCounter outputRecordsCounter = counters.findCounter(TaskCounter.OUTPUT_RECORDS);
    TezCounter outputBytesWithOverheadCounter = counters.findCounter(TaskCounter.OUTPUT_BYTES_WITH_OVERHEAD);
    TezCounter fileOutputBytesCounter = counters.findCounter(TaskCounter.OUTPUT_BYTES_PHYSICAL);
    TezCounter spilledRecordsCounter = counters.findCounter(TaskCounter.SPILLED_RECORDS);
    TezCounter additionalSpillBytesWritternCounter = counters.findCounter(TaskCounter.ADDITIONAL_SPILLS_BYTES_WRITTEN);
    TezCounter additionalSpillBytesReadCounter = counters.findCounter(TaskCounter.ADDITIONAL_SPILLS_BYTES_READ);
    TezCounter numAdditionalSpillsCounter = counters.findCounter(TaskCounter.ADDITIONAL_SPILL_COUNT);
    assertEquals(numRecordsWritten * sizePerRecord, outputRecordBytesCounter.getValue());
    if (numPartitions > 1) {
        assertEquals(numRecordsWritten * sizePerRecordWithOverhead, outputBytesWithOverheadCounter.getValue());
    }
    assertEquals(numRecordsWritten, outputRecordsCounter.getValue());
    long fileOutputBytes = fileOutputBytesCounter.getValue();
    if (numRecordsWritten > 0) {
        assertTrue(fileOutputBytes > 0);
        if (!shouldCompress) {
            assertTrue(fileOutputBytes > outputRecordBytesCounter.getValue());
        }
    } else {
        assertEquals(0, fileOutputBytes);
    }
    assertEquals(recordsPerBuffer * numExpectedSpills, spilledRecordsCounter.getValue());
    long additionalSpillBytesWritten = additionalSpillBytesWritternCounter.getValue();
    long additionalSpillBytesRead = additionalSpillBytesReadCounter.getValue();
    if (numExpectedSpills == 0) {
        assertEquals(0, additionalSpillBytesWritten);
        assertEquals(0, additionalSpillBytesRead);
    } else {
        assertTrue(additionalSpillBytesWritten > 0);
        assertTrue(additionalSpillBytesRead > 0);
        if (!shouldCompress) {
            assertTrue(additionalSpillBytesWritten > (recordsPerBuffer * numExpectedSpills * sizePerRecord));
            assertTrue(additionalSpillBytesRead > (recordsPerBuffer * numExpectedSpills * sizePerRecord));
        }
    }
    assertEquals(additionalSpillBytesWritten, additionalSpillBytesRead);
    // due to multiple threads, buffers could be merged in chunks in scheduleSpill.
    assertTrue(numExpectedSpills >= numAdditionalSpillsCounter.getValue());
    BitSet emptyPartitionBits = null;
    // Verify the events returned
    assertEquals(2, events.size());
    assertTrue(events.get(0) instanceof VertexManagerEvent);
    VertexManagerEvent vme = (VertexManagerEvent) events.get(0);
    verifyPartitionStats(vme, partitionsWithData);
    assertTrue(events.get(1) instanceof CompositeDataMovementEvent);
    CompositeDataMovementEvent cdme = (CompositeDataMovementEvent) events.get(1);
    assertEquals(0, cdme.getSourceIndexStart());
    assertEquals(numOutputs, cdme.getCount());
    DataMovementEventPayloadProto eventProto = DataMovementEventPayloadProto.parseFrom(ByteString.copyFrom(cdme.getUserPayload()));
    if (skippedPartitions == null && numRecordsWritten > 0) {
        assertFalse(eventProto.hasEmptyPartitions());
        emptyPartitionBits = new BitSet(numPartitions);
    } else {
        assertTrue(eventProto.hasEmptyPartitions());
        byte[] emptyPartitions = TezCommonUtils.decompressByteStringToByteArray(eventProto.getEmptyPartitions());
        emptyPartitionBits = TezUtilsInternal.fromByteArray(emptyPartitions);
        if (numRecordsWritten == 0) {
            assertEquals(numPartitions, emptyPartitionBits.cardinality());
        } else {
            for (Integer e : skippedPartitions) {
                assertTrue(emptyPartitionBits.get(e));
            }
            assertEquals(skippedPartitions.size(), emptyPartitionBits.cardinality());
        }
    }
    if (emptyPartitionBits.cardinality() != numPartitions) {
        assertEquals(HOST_STRING, eventProto.getHost());
        assertEquals(SHUFFLE_PORT, eventProto.getPort());
        assertEquals(uniqueId, eventProto.getPathComponent());
    } else {
        assertFalse(eventProto.hasHost());
        assertFalse(eventProto.hasPort());
        assertFalse(eventProto.hasPathComponent());
    }
    // Verify the actual data
    TezTaskOutput taskOutput = new TezTaskOutputFiles(conf, uniqueId, dagId);
    Path outputFilePath = kvWriter.finalOutPath;
    Path spillFilePath = kvWriter.finalIndexPath;
    if (numRecordsWritten <= 0) {
        return;
    }
    assertTrue(localFs.exists(outputFilePath));
    assertTrue(localFs.exists(spillFilePath));
    // verify no intermediate spill files have been left around
    synchronized (kvWriter.spillInfoList) {
        for (SpillInfo spill : kvWriter.spillInfoList) {
            assertFalse("lingering intermediate spill file " + spill.outPath, localFs.exists(spill.outPath));
        }
    }
    // Special case for 0 records.
    TezSpillRecord spillRecord = new TezSpillRecord(spillFilePath, conf);
    DataInputBuffer keyBuffer = new DataInputBuffer();
    DataInputBuffer valBuffer = new DataInputBuffer();
    IntWritable keyDeser = new IntWritable();
    LongWritable valDeser = new LongWritable();
    for (int i = 0; i < numOutputs; i++) {
        TezIndexRecord indexRecord = spillRecord.getIndex(i);
        if (skippedPartitions != null && skippedPartitions.contains(i)) {
            assertFalse("The Index Record for partition " + i + " should not have any data", indexRecord.hasData());
            continue;
        }
        FSDataInputStream inStream = FileSystem.getLocal(conf).open(outputFilePath);
        inStream.seek(indexRecord.getStartOffset());
        IFile.Reader reader = new IFile.Reader(inStream, indexRecord.getPartLength(), codec, null, null, false, 0, -1);
        while (reader.nextRawKey(keyBuffer)) {
            reader.nextRawValue(valBuffer);
            keyDeser.readFields(keyBuffer);
            valDeser.readFields(valBuffer);
            int partition = partitioner.getPartition(keyDeser, valDeser, numOutputs);
            assertTrue(expectedValues.get(partition).remove(keyDeser.get(), valDeser.get()));
        }
        inStream.close();
    }
    for (int i = 0; i < numOutputs; i++) {
        assertEquals(0, expectedValues.get(i).size());
        expectedValues.remove(i);
    }
    assertEquals(0, expectedValues.size());
    verify(outputContext, atLeast(1)).notifyProgress();
}
Also used : TezTaskOutputFiles(org.apache.tez.runtime.library.common.task.local.output.TezTaskOutputFiles) IFile(org.apache.tez.runtime.library.common.sort.impl.IFile) Configuration(org.apache.hadoop.conf.Configuration) TezConfiguration(org.apache.tez.dag.api.TezConfiguration) TezRuntimeConfiguration(org.apache.tez.runtime.library.api.TezRuntimeConfiguration) HashMap(java.util.HashMap) DefaultCodec(org.apache.hadoop.io.compress.DefaultCodec) ByteString(com.google.protobuf.ByteString) Configurable(org.apache.hadoop.conf.Configurable) TezCounter(org.apache.tez.common.counters.TezCounter) TezSpillRecord(org.apache.tez.runtime.library.common.sort.impl.TezSpillRecord) SpillInfo(org.apache.tez.runtime.library.common.writers.UnorderedPartitionedKVWriter.SpillInfo) DataMovementEventPayloadProto(org.apache.tez.runtime.library.shuffle.impl.ShuffleUserPayloads.DataMovementEventPayloadProto) CompressionCodec(org.apache.hadoop.io.compress.CompressionCodec) LongWritable(org.apache.hadoop.io.LongWritable) IntWritable(org.apache.hadoop.io.IntWritable) Path(org.apache.hadoop.fs.Path) BitSet(java.util.BitSet) TezCounters(org.apache.tez.common.counters.TezCounters) OutputContext(org.apache.tez.runtime.api.OutputContext) LinkedListMultimap(com.google.common.collect.LinkedListMultimap) Multimap(com.google.common.collect.Multimap) VertexManagerEvent(org.apache.tez.runtime.api.events.VertexManagerEvent) DataInputBuffer(org.apache.hadoop.io.DataInputBuffer) TaskFailureType(org.apache.tez.runtime.api.TaskFailureType) CompositeDataMovementEvent(org.apache.tez.runtime.api.events.CompositeDataMovementEvent) TezIndexRecord(org.apache.tez.runtime.library.common.sort.impl.TezIndexRecord) Event(org.apache.tez.runtime.api.Event) VertexManagerEvent(org.apache.tez.runtime.api.events.VertexManagerEvent) CompositeDataMovementEvent(org.apache.tez.runtime.api.events.CompositeDataMovementEvent) FSDataInputStream(org.apache.hadoop.fs.FSDataInputStream) ApplicationId(org.apache.hadoop.yarn.api.records.ApplicationId) TezTaskOutput(org.apache.tez.runtime.library.common.task.local.output.TezTaskOutput)

Aggregations

Configurable (org.apache.hadoop.conf.Configurable)25 CompressionCodec (org.apache.hadoop.io.compress.CompressionCodec)10 Configuration (org.apache.hadoop.conf.Configuration)8 Path (org.apache.hadoop.fs.Path)6 DefaultCodec (org.apache.hadoop.io.compress.DefaultCodec)5 ByteString (com.google.protobuf.ByteString)4 IOException (java.io.IOException)4 BitSet (java.util.BitSet)4 FSDataInputStream (org.apache.hadoop.fs.FSDataInputStream)4 IntWritable (org.apache.hadoop.io.IntWritable)4 ApplicationId (org.apache.hadoop.yarn.api.records.ApplicationId)4 TezCounter (org.apache.tez.common.counters.TezCounter)4 TezCounters (org.apache.tez.common.counters.TezCounters)4 FileOutputStream (java.io.FileOutputStream)3 HashMap (java.util.HashMap)3 LongWritable (org.apache.hadoop.io.LongWritable)3 TezConfiguration (org.apache.tez.dag.api.TezConfiguration)3 Event (org.apache.tez.runtime.api.Event)3 OutputContext (org.apache.tez.runtime.api.OutputContext)3 TaskFailureType (org.apache.tez.runtime.api.TaskFailureType)3