Search in sources :

Example 26 with HTableInterface

use of org.apache.hadoop.hbase.client.HTableInterface in project Solbase by Photobucket.

the class SolbaseCoreContainer method readSchemaXMLBytes.

public static byte[] readSchemaXMLBytes(String indexName) throws IOException {
    HTableInterface table = SolbaseUtil.getSchemaInfoTable();
    try {
        int idx = indexName.indexOf("~");
        if (idx >= 0) {
            indexName = indexName.substring(0, idx);
        }
        Get schemaGet = new Get(Bytes.toBytes(indexName));
        Result schemaQueryResult = table.get(schemaGet);
        byte[] schemaValue = schemaQueryResult.getValue(Bytes.toBytes("info"), Bytes.toBytes("schema"));
        return schemaValue;
    } finally {
        SolbaseUtil.releaseTable(table);
    }
}
Also used : Get(org.apache.hadoop.hbase.client.Get) HTableInterface(org.apache.hadoop.hbase.client.HTableInterface) Result(org.apache.hadoop.hbase.client.Result)

Example 27 with HTableInterface

use of org.apache.hadoop.hbase.client.HTableInterface in project Solbase by Photobucket.

the class SolbaseCoreContainer method writeSchema.

public static void writeSchema(String indexName, String schemaXml) throws IOException {
    HTableInterface table = SolbaseUtil.getSchemaInfoTable();
    try {
        Put schemaPut = new Put(Bytes.toBytes(indexName));
        schemaPut.add(Bytes.toBytes("info"), Bytes.toBytes("schema"), Bytes.toBytes(schemaXml));
        table.put(schemaPut);
    } finally {
        SolbaseUtil.releaseTable(table);
    }
}
Also used : HTableInterface(org.apache.hadoop.hbase.client.HTableInterface) Put(org.apache.hadoop.hbase.client.Put)

Example 28 with HTableInterface

use of org.apache.hadoop.hbase.client.HTableInterface in project phoenix by apache.

the class StatsCollectorIT method testCompactUpdatesStats.

private void testCompactUpdatesStats(Integer statsUpdateFreq, String tableName) throws Exception {
    int nRows = 10;
    Connection conn = getConnection(statsUpdateFreq);
    PreparedStatement stmt;
    conn.createStatement().execute("CREATE TABLE " + tableName + "(k CHAR(1) PRIMARY KEY, v INTEGER, w INTEGER) " + (!tableDDLOptions.isEmpty() ? tableDDLOptions + "," : "") + HColumnDescriptor.KEEP_DELETED_CELLS + "=" + Boolean.FALSE);
    stmt = conn.prepareStatement("UPSERT INTO " + tableName + " VALUES(?,?,?)");
    for (int i = 0; i < nRows; i++) {
        stmt.setString(1, Character.toString((char) ('a' + i)));
        stmt.setInt(2, i);
        stmt.setInt(3, i);
        stmt.executeUpdate();
    }
    conn.commit();
    compactTable(conn, physicalTableName);
    if (statsUpdateFreq != 0) {
        invalidateStats(conn, tableName);
    } else {
        // Confirm that when we have a non zero STATS_UPDATE_FREQ_MS_ATTRIB, after we run
        // UPDATATE STATISTICS, the new statistics are faulted in as expected.
        List<KeyRange> keyRanges = getAllSplits(conn, tableName);
        assertNotEquals(nRows + 1, keyRanges.size());
        // If we've set MIN_STATS_UPDATE_FREQ_MS_ATTRIB, an UPDATE STATISTICS will invalidate the cache
        // and forcing the new stats to be pulled over.
        int rowCount = conn.createStatement().executeUpdate("UPDATE STATISTICS " + tableName);
        assertEquals(10, rowCount);
    }
    List<KeyRange> keyRanges = getAllSplits(conn, tableName);
    assertEquals(nRows + 1, keyRanges.size());
    int nDeletedRows = conn.createStatement().executeUpdate("DELETE FROM " + tableName + " WHERE V < " + nRows / 2);
    conn.commit();
    assertEquals(5, nDeletedRows);
    Scan scan = new Scan();
    scan.setRaw(true);
    PhoenixConnection phxConn = conn.unwrap(PhoenixConnection.class);
    try (HTableInterface htable = phxConn.getQueryServices().getTable(Bytes.toBytes(tableName))) {
        ResultScanner scanner = htable.getScanner(scan);
        Result result;
        while ((result = scanner.next()) != null) {
            System.out.println(result);
        }
    }
    compactTable(conn, physicalTableName);
    scan = new Scan();
    scan.setRaw(true);
    phxConn = conn.unwrap(PhoenixConnection.class);
    try (HTableInterface htable = phxConn.getQueryServices().getTable(Bytes.toBytes(tableName))) {
        ResultScanner scanner = htable.getScanner(scan);
        Result result;
        while ((result = scanner.next()) != null) {
            System.out.println(result);
        }
    }
    if (statsUpdateFreq != 0) {
        invalidateStats(conn, tableName);
    } else {
        assertEquals(nRows + 1, keyRanges.size());
        // If we've set STATS_UPDATE_FREQ_MS_ATTRIB, an UPDATE STATISTICS will invalidate the cache
        // and force us to pull over the new stats
        int rowCount = conn.createStatement().executeUpdate("UPDATE STATISTICS " + tableName);
        assertEquals(5, rowCount);
    }
    keyRanges = getAllSplits(conn, tableName);
    assertEquals(nRows / 2 + 1, keyRanges.size());
    ResultSet rs = conn.createStatement().executeQuery("SELECT SUM(GUIDE_POSTS_ROW_COUNT) FROM " + "\"" + SYSTEM_CATALOG_SCHEMA + "\".\"" + SYSTEM_STATS_TABLE + "\"" + " WHERE PHYSICAL_NAME='" + physicalTableName + "'");
    rs.next();
    assertEquals(nRows - nDeletedRows, rs.getLong(1));
}
Also used : PhoenixConnection(org.apache.phoenix.jdbc.PhoenixConnection) ResultScanner(org.apache.hadoop.hbase.client.ResultScanner) KeyRange(org.apache.phoenix.query.KeyRange) Connection(java.sql.Connection) PhoenixConnection(org.apache.phoenix.jdbc.PhoenixConnection) ResultSet(java.sql.ResultSet) PreparedStatement(java.sql.PreparedStatement) Scan(org.apache.hadoop.hbase.client.Scan) HTableInterface(org.apache.hadoop.hbase.client.HTableInterface) Result(org.apache.hadoop.hbase.client.Result)

Example 29 with HTableInterface

use of org.apache.hadoop.hbase.client.HTableInterface in project phoenix by apache.

the class LocalIndexIT method testLocalIndexAutomaticRepair.

@Test
public void testLocalIndexAutomaticRepair() throws Exception {
    if (isNamespaceMapped) {
        return;
    }
    PhoenixConnection conn = DriverManager.getConnection(getUrl()).unwrap(PhoenixConnection.class);
    try (HTableInterface metaTable = conn.getQueryServices().getTable(TableName.META_TABLE_NAME.getName());
        HBaseAdmin admin = conn.getQueryServices().getAdmin()) {
        Statement statement = conn.createStatement();
        final String tableName = "T_AUTO_MATIC_REPAIR";
        String indexName = "IDX_T_AUTO_MATIC_REPAIR";
        String indexName1 = "IDX_T_AUTO_MATIC_REPAIR_1";
        statement.execute("create table " + tableName + " (id integer not null,fn varchar," + "cf1.ln varchar constraint pk primary key(id)) split on (1,2,3,4,5)");
        statement.execute("create local index " + indexName + " on " + tableName + "  (fn,cf1.ln)");
        statement.execute("create local index " + indexName1 + " on " + tableName + "  (fn)");
        for (int i = 0; i < 7; i++) {
            statement.execute("upsert into " + tableName + "  values(" + i + ",'fn" + i + "','ln" + i + "')");
        }
        conn.commit();
        ResultSet rs = statement.executeQuery("SELECT COUNT(*) FROM " + indexName);
        assertTrue(rs.next());
        assertEquals(7, rs.getLong(1));
        List<HRegionInfo> tableRegions = admin.getTableRegions(TableName.valueOf(tableName));
        admin.disableTable(tableName);
        copyLocalIndexHFiles(config, tableRegions.get(0), tableRegions.get(1), false);
        copyLocalIndexHFiles(config, tableRegions.get(3), tableRegions.get(0), false);
        admin.enableTable(tableName);
        int count = getCount(conn, tableName, "L#0");
        assertTrue(count > 14);
        admin.majorCompact(TableName.valueOf(tableName));
        // need to wait for rebuilding of corrupted local index region
        int tryCount = 5;
        while (tryCount-- > 0 && count != 14) {
            Thread.sleep(15000);
            count = getCount(conn, tableName, "L#0");
        }
        assertEquals(14, count);
        rs = statement.executeQuery("SELECT COUNT(*) FROM " + indexName1);
        assertTrue(rs.next());
        assertEquals(7, rs.getLong(1));
        statement.execute("DROP INDEX " + indexName1 + " ON " + tableName);
        admin.majorCompact(TableName.valueOf(tableName));
        statement.execute("DROP INDEX " + indexName + " ON " + tableName);
        admin.majorCompact(TableName.valueOf(tableName));
        Thread.sleep(15000);
        admin.majorCompact(TableName.valueOf(tableName));
        Thread.sleep(15000);
        rs = statement.executeQuery("SELECT COUNT(*) FROM " + tableName);
        assertTrue(rs.next());
    }
}
Also used : HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) HBaseAdmin(org.apache.hadoop.hbase.client.HBaseAdmin) PhoenixConnection(org.apache.phoenix.jdbc.PhoenixConnection) PhoenixStatement(org.apache.phoenix.jdbc.PhoenixStatement) Statement(java.sql.Statement) ResultSet(java.sql.ResultSet) HTableInterface(org.apache.hadoop.hbase.client.HTableInterface) Test(org.junit.Test)

Example 30 with HTableInterface

use of org.apache.hadoop.hbase.client.HTableInterface in project phoenix by apache.

the class MutationState method send.

@SuppressWarnings("deprecation")
private void send(Iterator<TableRef> tableRefIterator) throws SQLException {
    int i = 0;
    long[] serverTimeStamps = null;
    boolean sendAll = false;
    if (tableRefIterator == null) {
        serverTimeStamps = validateAll();
        tableRefIterator = mutations.keySet().iterator();
        sendAll = true;
    }
    Map<ImmutableBytesPtr, RowMutationState> valuesMap;
    List<TableRef> txTableRefs = Lists.newArrayListWithExpectedSize(mutations.size());
    Map<TableInfo, List<Mutation>> physicalTableMutationMap = Maps.newLinkedHashMap();
    // add tracing for this operation
    try (TraceScope trace = Tracing.startNewSpan(connection, "Committing mutations to tables")) {
        Span span = trace.getSpan();
        ImmutableBytesWritable indexMetaDataPtr = new ImmutableBytesWritable();
        boolean isTransactional;
        while (tableRefIterator.hasNext()) {
            // at this point we are going through mutations for each table
            final TableRef tableRef = tableRefIterator.next();
            valuesMap = mutations.get(tableRef);
            if (valuesMap == null || valuesMap.isEmpty()) {
                continue;
            }
            // Validate as we go if transactional since we can undo if a problem occurs (which is unlikely)
            long serverTimestamp = serverTimeStamps == null ? validate(tableRef, valuesMap) : serverTimeStamps[i++];
            final PTable table = tableRef.getTable();
            Iterator<Pair<PName, List<Mutation>>> mutationsIterator = addRowMutations(tableRef, valuesMap, serverTimestamp, false, sendAll);
            // build map from physical table to mutation list
            boolean isDataTable = true;
            while (mutationsIterator.hasNext()) {
                Pair<PName, List<Mutation>> pair = mutationsIterator.next();
                PName hTableName = pair.getFirst();
                List<Mutation> mutationList = pair.getSecond();
                TableInfo tableInfo = new TableInfo(isDataTable, hTableName, tableRef);
                List<Mutation> oldMutationList = physicalTableMutationMap.put(tableInfo, mutationList);
                if (oldMutationList != null)
                    mutationList.addAll(0, oldMutationList);
                isDataTable = false;
            }
            // committed in the event of a failure.
            if (table.isTransactional()) {
                addUncommittedStatementIndexes(valuesMap.values());
                if (txMutations.isEmpty()) {
                    txMutations = Maps.newHashMapWithExpectedSize(mutations.size());
                }
                // Keep all mutations we've encountered until a commit or rollback.
                // This is not ideal, but there's not good way to get the values back
                // in the event that we need to replay the commit.
                // Copy TableRef so we have the original PTable and know when the
                // indexes have changed.
                joinMutationState(new TableRef(tableRef), valuesMap, txMutations);
            }
        }
        long serverTimestamp = HConstants.LATEST_TIMESTAMP;
        Iterator<Entry<TableInfo, List<Mutation>>> mutationsIterator = physicalTableMutationMap.entrySet().iterator();
        while (mutationsIterator.hasNext()) {
            Entry<TableInfo, List<Mutation>> pair = mutationsIterator.next();
            TableInfo tableInfo = pair.getKey();
            byte[] htableName = tableInfo.getHTableName().getBytes();
            List<Mutation> mutationList = pair.getValue();
            //create a span per target table
            //TODO maybe we can be smarter about the table name to string here?
            Span child = Tracing.child(span, "Writing mutation batch for table: " + Bytes.toString(htableName));
            int retryCount = 0;
            boolean shouldRetry = false;
            do {
                TableRef origTableRef = tableInfo.getOrigTableRef();
                PTable table = origTableRef.getTable();
                table.getIndexMaintainers(indexMetaDataPtr, connection);
                final ServerCache cache = tableInfo.isDataTable() ? setMetaDataOnMutations(origTableRef, mutationList, indexMetaDataPtr) : null;
                // If we haven't retried yet, retry for this case only, as it's possible that
                // a split will occur after we send the index metadata cache to all known
                // region servers.
                shouldRetry = cache != null;
                SQLException sqlE = null;
                HTableInterface hTable = connection.getQueryServices().getTable(htableName);
                try {
                    if (table.isTransactional()) {
                        // Track tables to which we've sent uncommitted data
                        txTableRefs.add(origTableRef);
                        addDMLFence(table);
                        uncommittedPhysicalNames.add(table.getPhysicalName().getString());
                        // rollback
                        if (!table.getIndexes().isEmpty()) {
                            hTable = new MetaDataAwareHTable(hTable, origTableRef);
                        }
                        TransactionAwareHTable txnAware = TransactionUtil.getTransactionAwareHTable(hTable, table.isImmutableRows());
                        // during a commit), as we don't need conflict detection for these.
                        if (tableInfo.isDataTable()) {
                            // Even for immutable, we need to do this so that an abort has the state
                            // necessary to generate the rows to delete.
                            addTransactionParticipant(txnAware);
                        } else {
                            txnAware.startTx(getTransaction());
                        }
                        hTable = txnAware;
                    }
                    long numMutations = mutationList.size();
                    GLOBAL_MUTATION_BATCH_SIZE.update(numMutations);
                    long startTime = System.currentTimeMillis();
                    child.addTimelineAnnotation("Attempt " + retryCount);
                    List<List<Mutation>> mutationBatchList = getMutationBatchList(batchSize, batchSizeBytes, mutationList);
                    for (List<Mutation> mutationBatch : mutationBatchList) {
                        hTable.batch(mutationBatch);
                        batchCount++;
                    }
                    if (logger.isDebugEnabled())
                        logger.debug("Sent batch of " + numMutations + " for " + Bytes.toString(htableName));
                    child.stop();
                    child.stop();
                    shouldRetry = false;
                    long mutationCommitTime = System.currentTimeMillis() - startTime;
                    GLOBAL_MUTATION_COMMIT_TIME.update(mutationCommitTime);
                    long mutationSizeBytes = calculateMutationSize(mutationList);
                    MutationMetric mutationsMetric = new MutationMetric(numMutations, mutationSizeBytes, mutationCommitTime);
                    mutationMetricQueue.addMetricsForTable(Bytes.toString(htableName), mutationsMetric);
                    if (tableInfo.isDataTable()) {
                        numRows -= numMutations;
                    }
                    // Remove batches as we process them
                    mutations.remove(origTableRef);
                } catch (Exception e) {
                    serverTimestamp = ServerUtil.parseServerTimestamp(e);
                    SQLException inferredE = ServerUtil.parseServerExceptionOrNull(e);
                    if (inferredE != null) {
                        if (shouldRetry && retryCount == 0 && inferredE.getErrorCode() == SQLExceptionCode.INDEX_METADATA_NOT_FOUND.getErrorCode()) {
                            // Swallow this exception once, as it's possible that we split after sending the index metadata
                            // and one of the region servers doesn't have it. This will cause it to have it the next go around.
                            // If it fails again, we don't retry.
                            String msg = "Swallowing exception and retrying after clearing meta cache on connection. " + inferredE;
                            logger.warn(LogUtil.addCustomAnnotations(msg, connection));
                            connection.getQueryServices().clearTableRegionCache(htableName);
                            // add a new child span as this one failed
                            child.addTimelineAnnotation(msg);
                            child.stop();
                            child = Tracing.child(span, "Failed batch, attempting retry");
                            continue;
                        }
                        e = inferredE;
                    }
                    // Throw to client an exception that indicates the statements that
                    // were not committed successfully.
                    sqlE = new CommitException(e, getUncommittedStatementIndexes(), serverTimestamp);
                } finally {
                    try {
                        if (cache != null)
                            cache.close();
                    } finally {
                        try {
                            hTable.close();
                        } catch (IOException e) {
                            if (sqlE != null) {
                                sqlE.setNextException(ServerUtil.parseServerException(e));
                            } else {
                                sqlE = ServerUtil.parseServerException(e);
                            }
                        }
                        if (sqlE != null) {
                            throw sqlE;
                        }
                    }
                }
            } while (shouldRetry && retryCount++ < 1);
        }
    }
}
Also used : ServerCache(org.apache.phoenix.cache.ServerCacheClient.ServerCache) SQLException(java.sql.SQLException) MutationMetric(org.apache.phoenix.monitoring.MutationMetricQueue.MutationMetric) HTableInterface(org.apache.hadoop.hbase.client.HTableInterface) Span(org.apache.htrace.Span) PTable(org.apache.phoenix.schema.PTable) Entry(java.util.Map.Entry) List(java.util.List) Pair(org.apache.hadoop.hbase.util.Pair) TransactionAwareHTable(org.apache.tephra.hbase.TransactionAwareHTable) ImmutableBytesWritable(org.apache.hadoop.hbase.io.ImmutableBytesWritable) ImmutableBytesPtr(org.apache.phoenix.hbase.index.util.ImmutableBytesPtr) TraceScope(org.apache.htrace.TraceScope) IOException(java.io.IOException) TransactionFailureException(org.apache.tephra.TransactionFailureException) IllegalDataException(org.apache.phoenix.schema.IllegalDataException) TimeoutException(java.util.concurrent.TimeoutException) TransactionConflictException(org.apache.tephra.TransactionConflictException) TableNotFoundException(org.apache.phoenix.schema.TableNotFoundException) SQLException(java.sql.SQLException) IOException(java.io.IOException) PName(org.apache.phoenix.schema.PName) Mutation(org.apache.hadoop.hbase.client.Mutation) PTableRef(org.apache.phoenix.schema.PTableRef) TableRef(org.apache.phoenix.schema.TableRef)

Aggregations

HTableInterface (org.apache.hadoop.hbase.client.HTableInterface)122 Result (org.apache.hadoop.hbase.client.Result)43 Put (org.apache.hadoop.hbase.client.Put)42 IOException (java.io.IOException)38 ArrayList (java.util.ArrayList)27 PhoenixConnection (org.apache.phoenix.jdbc.PhoenixConnection)23 Get (org.apache.hadoop.hbase.client.Get)21 Scan (org.apache.hadoop.hbase.client.Scan)21 Test (org.junit.Test)20 SQLException (java.sql.SQLException)19 HashMap (java.util.HashMap)17 ResultScanner (org.apache.hadoop.hbase.client.ResultScanner)17 Connection (java.sql.Connection)15 Delete (org.apache.hadoop.hbase.client.Delete)12 HBaseAdmin (org.apache.hadoop.hbase.client.HBaseAdmin)12 Mutation (org.apache.hadoop.hbase.client.Mutation)12 PhoenixIOException (org.apache.phoenix.exception.PhoenixIOException)11 ResultSet (java.sql.ResultSet)10 Map (java.util.Map)9 Configuration (org.apache.hadoop.conf.Configuration)9