Search in sources :

Example 66 with AtomicBoolean

use of java.util.concurrent.atomic.AtomicBoolean in project hbase by apache.

the class TestHBaseFsckOneRS method testQuarantineMissingRegionDir.

/**
   * This creates a table and simulates the race situation where a concurrent compaction or split
   * has removed a region dir before the corruption checker got to it.
   */
@Test(timeout = 180000)
public void testQuarantineMissingRegionDir() throws Exception {
    final TableName tableName = TableName.valueOf(name.getMethodName());
    // inject a fault in the hfcc created.
    final FileSystem fs = FileSystem.get(conf);
    HBaseFsck hbck = new HBaseFsck(conf, hbfsckExecutorService) {

        @Override
        public HFileCorruptionChecker createHFileCorruptionChecker(boolean sidelineCorruptHFiles) throws IOException {
            return new HFileCorruptionChecker(conf, executor, sidelineCorruptHFiles) {

                AtomicBoolean attemptedFirstHFile = new AtomicBoolean(false);

                @Override
                protected void checkRegionDir(Path p) throws IOException {
                    if (attemptedFirstHFile.compareAndSet(false, true)) {
                        // make sure delete happened.
                        assertTrue(fs.delete(p, true));
                    }
                    super.checkRegionDir(p);
                }
            };
        }
    };
    doQuarantineTest(tableName, hbck, 3, 0, 0, 0, 1);
    hbck.close();
}
Also used : Path(org.apache.hadoop.fs.Path) TableName(org.apache.hadoop.hbase.TableName) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) FileSystem(org.apache.hadoop.fs.FileSystem) HFileCorruptionChecker(org.apache.hadoop.hbase.util.hbck.HFileCorruptionChecker) Test(org.junit.Test)

Example 67 with AtomicBoolean

use of java.util.concurrent.atomic.AtomicBoolean in project hbase by apache.

the class TestHBaseFsckOneRS method testQuarantineMissingFamdir.

/**
   * This creates a table and simulates the race situation where a concurrent compaction or split
   * has removed an colfam dir before the corruption checker got to it.
   */
// Disabled because fails sporadically.  Is this test right?  Timing-wise, there could be no
// files in a column family on initial creation -- as suggested by Matteo.
@Ignore
@Test(timeout = 180000)
public void testQuarantineMissingFamdir() throws Exception {
    final TableName tableName = TableName.valueOf(name.getMethodName());
    // inject a fault in the hfcc created.
    final FileSystem fs = FileSystem.get(conf);
    HBaseFsck hbck = new HBaseFsck(conf, hbfsckExecutorService) {

        @Override
        public HFileCorruptionChecker createHFileCorruptionChecker(boolean sidelineCorruptHFiles) throws IOException {
            return new HFileCorruptionChecker(conf, executor, sidelineCorruptHFiles) {

                AtomicBoolean attemptedFirstHFile = new AtomicBoolean(false);

                @Override
                protected void checkColFamDir(Path p) throws IOException {
                    if (attemptedFirstHFile.compareAndSet(false, true)) {
                        // make sure delete happened.
                        assertTrue(fs.delete(p, true));
                    }
                    super.checkColFamDir(p);
                }
            };
        }
    };
    doQuarantineTest(tableName, hbck, 3, 0, 0, 0, 1);
    hbck.close();
}
Also used : Path(org.apache.hadoop.fs.Path) TableName(org.apache.hadoop.hbase.TableName) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) FileSystem(org.apache.hadoop.fs.FileSystem) HFileCorruptionChecker(org.apache.hadoop.hbase.util.hbck.HFileCorruptionChecker) Ignore(org.junit.Ignore) Test(org.junit.Test)

Example 68 with AtomicBoolean

use of java.util.concurrent.atomic.AtomicBoolean in project hive by apache.

the class ObjectStore method getPartitionsByExprInternal.

protected boolean getPartitionsByExprInternal(String dbName, String tblName, final byte[] expr, final String defaultPartitionName, final short maxParts, List<Partition> result, boolean allowSql, boolean allowJdo) throws TException {
    assert result != null;
    final ExpressionTree exprTree = PartFilterExprUtil.makeExpressionTree(expressionProxy, expr);
    final AtomicBoolean hasUnknownPartitions = new AtomicBoolean(false);
    result.addAll(new GetListHelper<Partition>(dbName, tblName, allowSql, allowJdo) {

        @Override
        protected List<Partition> getSqlResult(GetHelper<List<Partition>> ctx) throws MetaException {
            // If we have some sort of expression tree, try SQL filter pushdown.
            List<Partition> result = null;
            if (exprTree != null) {
                SqlFilterForPushdown filter = new SqlFilterForPushdown();
                if (directSql.generateSqlFilterForPushdown(ctx.getTable(), exprTree, filter)) {
                    return directSql.getPartitionsViaSqlFilter(filter, null);
                }
            }
            // We couldn't do SQL filter pushdown. Get names via normal means.
            List<String> partNames = new LinkedList<String>();
            hasUnknownPartitions.set(getPartitionNamesPrunedByExprNoTxn(ctx.getTable(), expr, defaultPartitionName, maxParts, partNames));
            return directSql.getPartitionsViaSqlFilter(dbName, tblName, partNames);
        }

        @Override
        protected List<Partition> getJdoResult(GetHelper<List<Partition>> ctx) throws MetaException, NoSuchObjectException {
            // If we have some sort of expression tree, try JDOQL filter pushdown.
            List<Partition> result = null;
            if (exprTree != null) {
                result = getPartitionsViaOrmFilter(ctx.getTable(), exprTree, maxParts, false);
            }
            if (result == null) {
                // We couldn't do JDOQL filter pushdown. Get names via normal means.
                List<String> partNames = new ArrayList<String>();
                hasUnknownPartitions.set(getPartitionNamesPrunedByExprNoTxn(ctx.getTable(), expr, defaultPartitionName, maxParts, partNames));
                result = getPartitionsViaOrmFilter(dbName, tblName, partNames);
            }
            return result;
        }
    }.run(true));
    return hasUnknownPartitions.get();
}
Also used : AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) MPartition(org.apache.hadoop.hive.metastore.model.MPartition) Partition(org.apache.hadoop.hive.metastore.api.Partition) ArrayList(java.util.ArrayList) ExpressionTree(org.apache.hadoop.hive.metastore.parser.ExpressionTree) SqlFilterForPushdown(org.apache.hadoop.hive.metastore.MetaStoreDirectSql.SqlFilterForPushdown) LinkedList(java.util.LinkedList)

Example 69 with AtomicBoolean

use of java.util.concurrent.atomic.AtomicBoolean in project hive by apache.

the class CalcitePlanner method rewriteASTForMultiInsert.

private ASTNode rewriteASTForMultiInsert(ASTNode query, ASTNode nodeOfInterest) {
    // 1. gather references from original query
    // This is a map from aliases to references.
    // We keep all references as we will need to modify them after creating
    // the subquery
    final Multimap<String, Object> aliasNodes = ArrayListMultimap.create();
    // To know if we need to bail out
    final AtomicBoolean notSupported = new AtomicBoolean(false);
    TreeVisitorAction action = new TreeVisitorAction() {

        @Override
        public Object pre(Object t) {
            if (!notSupported.get()) {
                if (ParseDriver.adaptor.getType(t) == HiveParser.TOK_ALLCOLREF) {
                    // TODO: this is a limitation of the AST rewriting approach that we will
                    // not be able to overcome till proper integration of full multi-insert
                    // queries with Calcite is implemented.
                    // The current rewriting gather references from insert clauses and then
                    // updates them with the new subquery references. However, if insert
                    // clauses use * or tab.*, we cannot resolve the columns that we are
                    // referring to. Thus, we just bail out and those queries will not be
                    // currently optimized by Calcite.
                    // An example of such query is:
                    // FROM T_A a LEFT JOIN T_B b ON a.id = b.id
                    // INSERT OVERWRITE TABLE join_result_1
                    // SELECT a.*, b.*
                    // INSERT OVERWRITE TABLE join_result_3
                    // SELECT a.*, b.*;
                    notSupported.set(true);
                } else if (ParseDriver.adaptor.getType(t) == HiveParser.DOT) {
                    Object c = ParseDriver.adaptor.getChild(t, 0);
                    if (c != null && ParseDriver.adaptor.getType(c) == HiveParser.TOK_TABLE_OR_COL) {
                        aliasNodes.put(((ASTNode) t).toStringTree(), t);
                    }
                } else if (ParseDriver.adaptor.getType(t) == HiveParser.TOK_TABLE_OR_COL) {
                    Object p = ParseDriver.adaptor.getParent(t);
                    if (p == null || ParseDriver.adaptor.getType(p) != HiveParser.DOT) {
                        aliasNodes.put(((ASTNode) t).toStringTree(), t);
                    }
                }
            }
            return t;
        }

        @Override
        public Object post(Object t) {
            return t;
        }
    };
    TreeVisitor tv = new TreeVisitor(ParseDriver.adaptor);
    // the subtree to gather the references
    for (int i = 0; i < query.getChildCount(); i++) {
        ASTNode child = (ASTNode) query.getChild(i);
        if (ParseDriver.adaptor.getType(child) != HiveParser.TOK_INSERT) {
            // If it is not an INSERT, we do not need to anything
            continue;
        }
        tv.visit(child, action);
    }
    if (notSupported.get()) {
        // Bail out
        return null;
    }
    // 2. rewrite into query
    //  TOK_QUERY
    //     TOK_FROM
    //        join
    //     TOK_INSERT
    //        TOK_DESTINATION
    //           TOK_DIR
    //              TOK_TMP_FILE
    //        TOK_SELECT
    //           refs
    ASTNode from = new ASTNode(new CommonToken(HiveParser.TOK_FROM, "TOK_FROM"));
    from.addChild((ASTNode) ParseDriver.adaptor.dupTree(nodeOfInterest));
    ASTNode destination = new ASTNode(new CommonToken(HiveParser.TOK_DESTINATION, "TOK_DESTINATION"));
    ASTNode dir = new ASTNode(new CommonToken(HiveParser.TOK_DIR, "TOK_DIR"));
    ASTNode tmpFile = new ASTNode(new CommonToken(HiveParser.TOK_TMP_FILE, "TOK_TMP_FILE"));
    dir.addChild(tmpFile);
    destination.addChild(dir);
    ASTNode select = new ASTNode(new CommonToken(HiveParser.TOK_SELECT, "TOK_SELECT"));
    int num = 0;
    for (Collection<Object> selectIdentifier : aliasNodes.asMap().values()) {
        Iterator<Object> it = selectIdentifier.iterator();
        ASTNode node = (ASTNode) it.next();
        // Add select expression
        ASTNode selectExpr = new ASTNode(new CommonToken(HiveParser.TOK_SELEXPR, "TOK_SELEXPR"));
        // Identifier
        selectExpr.addChild((ASTNode) ParseDriver.adaptor.dupTree(node));
        String colAlias = "col" + num;
        // Alias
        selectExpr.addChild(new ASTNode(new CommonToken(HiveParser.Identifier, colAlias)));
        select.addChild(selectExpr);
        // Rewrite all INSERT references (all the node values for this key)
        ASTNode colExpr = new ASTNode(new CommonToken(HiveParser.TOK_TABLE_OR_COL, "TOK_TABLE_OR_COL"));
        colExpr.addChild(new ASTNode(new CommonToken(HiveParser.Identifier, colAlias)));
        replaceASTChild(node, colExpr);
        while (it.hasNext()) {
            // Loop to rewrite rest of INSERT references
            node = (ASTNode) it.next();
            colExpr = new ASTNode(new CommonToken(HiveParser.TOK_TABLE_OR_COL, "TOK_TABLE_OR_COL"));
            colExpr.addChild(new ASTNode(new CommonToken(HiveParser.Identifier, colAlias)));
            replaceASTChild(node, colExpr);
        }
        num++;
    }
    ASTNode insert = new ASTNode(new CommonToken(HiveParser.TOK_INSERT, "TOK_INSERT"));
    insert.addChild(destination);
    insert.addChild(select);
    ASTNode newQuery = new ASTNode(new CommonToken(HiveParser.TOK_QUERY, "TOK_QUERY"));
    newQuery.addChild(from);
    newQuery.addChild(insert);
    // 3. create subquery
    ASTNode subq = new ASTNode(new CommonToken(HiveParser.TOK_SUBQUERY, "TOK_SUBQUERY"));
    subq.addChild(newQuery);
    subq.addChild(new ASTNode(new CommonToken(HiveParser.Identifier, "subq")));
    replaceASTChild(nodeOfInterest, subq);
    // 4. return subquery
    return subq;
}
Also used : TreeVisitor(org.antlr.runtime.tree.TreeVisitor) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) TreeVisitorAction(org.antlr.runtime.tree.TreeVisitorAction) CommonToken(org.antlr.runtime.CommonToken)

Example 70 with AtomicBoolean

use of java.util.concurrent.atomic.AtomicBoolean in project hbase by apache.

the class TestMasterBalanceThrottling method testThrottlingByBalanceInterval.

@Test(timeout = 60000)
public void testThrottlingByBalanceInterval() throws Exception {
    // Use default config and start a cluster of two regionservers.
    TEST_UTIL.startMiniCluster(2);
    TableName tableName = createTable("testNoThrottling");
    final HMaster master = TEST_UTIL.getHBaseCluster().getMaster();
    // Default max balancing time is 300000 ms and there are 50 regions to balance
    // The balance interval is 6000 ms, much longger than the normal region in transition duration
    // So the master can balance the region one by one
    unbalance(master, tableName);
    AtomicInteger maxCount = new AtomicInteger(0);
    AtomicBoolean stop = new AtomicBoolean(false);
    Thread checker = startBalancerChecker(master, maxCount, stop);
    master.balance();
    stop.set(true);
    checker.interrupt();
    checker.join();
    assertTrue("max regions in transition: " + maxCount.get(), maxCount.get() == 1);
    TEST_UTIL.deleteTable(tableName);
}
Also used : TableName(org.apache.hadoop.hbase.TableName) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) Test(org.junit.Test)

Aggregations

AtomicBoolean (java.util.concurrent.atomic.AtomicBoolean)2412 Test (org.junit.Test)1002 CountDownLatch (java.util.concurrent.CountDownLatch)394 IOException (java.io.IOException)336 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)301 ArrayList (java.util.ArrayList)214 AtomicReference (java.util.concurrent.atomic.AtomicReference)202 ENotificationImpl (org.eclipse.emf.ecore.impl.ENotificationImpl)108 Test (org.testng.annotations.Test)106 List (java.util.List)98 Ignite (org.apache.ignite.Ignite)98 AtomicLong (java.util.concurrent.atomic.AtomicLong)94 HashMap (java.util.HashMap)93 ExecutorService (java.util.concurrent.ExecutorService)90 Map (java.util.Map)88 ExecutionException (java.util.concurrent.ExecutionException)87 File (java.io.File)68 Random (java.util.Random)68 CyclicBarrier (java.util.concurrent.CyclicBarrier)68 HashSet (java.util.HashSet)63