Search in sources :

Example 6 with CommitException

use of org.apache.phoenix.execute.CommitException in project phoenix by apache.

the class PartialIndexRebuilderIT method testIndexWriteFailureDuringRebuild.

private void testIndexWriteFailureDuringRebuild(PIndexState indexStateOnFailure) throws Throwable {
    String schemaName = generateUniqueName();
    String tableName = generateUniqueName();
    String indexName = generateUniqueName();
    final String fullTableName = SchemaUtil.getTableName(schemaName, tableName);
    final String fullIndexName = SchemaUtil.getTableName(schemaName, indexName);
    PTableKey key = new PTableKey(null, fullTableName);
    final MyClock clock = new MyClock(1000);
    EnvironmentEdgeManager.injectEdge(clock);
    try (Connection conn = DriverManager.getConnection(getUrl())) {
        PMetaData metaCache = conn.unwrap(PhoenixConnection.class).getMetaDataCache();
        conn.createStatement().execute("CREATE TABLE " + fullTableName + "(k VARCHAR PRIMARY KEY, v1 VARCHAR, v2 VARCHAR, v3 VARCHAR) COLUMN_ENCODED_BYTES = 0, DISABLE_INDEX_ON_WRITE_FAILURE = " + (indexStateOnFailure == PIndexState.DISABLE));
        clock.time += 100;
        conn.createStatement().execute("CREATE INDEX " + indexName + " ON " + fullTableName + " (v1, v2)");
        clock.time += 100;
        conn.createStatement().execute("UPSERT INTO " + fullTableName + " VALUES('a','a','0')");
        conn.commit();
        clock.time += 100;
        HTableInterface metaTable = conn.unwrap(PhoenixConnection.class).getQueryServices().getTable(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES);
        long disableTime = clock.currentTime();
        // Simulates an index write failure
        IndexUtil.updateIndexState(fullIndexName, indexStateOnFailure == PIndexState.DISABLE ? disableTime : -disableTime, metaTable, indexStateOnFailure);
        clock.time += 100;
        conn.createStatement().execute("UPSERT INTO " + fullTableName + " VALUES('bb','bb', '11')");
        conn.commit();
        // Large enough to be in separate time batch
        clock.time += 2 * REBUILD_PERIOD;
        assertTrue(hasIndexWithState(metaCache, key, indexStateOnFailure));
        assertEquals(2, TestUtil.getRowCount(conn, fullTableName));
        assertEquals(1, TestUtil.getRowCount(conn, fullIndexName));
        conn.createStatement().execute("UPSERT INTO " + fullTableName + " VALUES('ccc','ccc','222')");
        conn.commit();
        assertEquals(3, TestUtil.getRowCount(conn, fullTableName));
        assertEquals(1, TestUtil.getRowCount(conn, fullIndexName));
        clock.time += 100;
        waitForIndexState(conn, fullTableName, fullIndexName, indexStateOnFailure == PIndexState.DISABLE ? PIndexState.INACTIVE : PIndexState.ACTIVE);
        clock.time += WAIT_AFTER_DISABLED;
        // First batch should have been processed
        runIndexRebuilder(fullTableName);
        assertEquals(2, TestUtil.getRowCount(conn, fullIndexName));
        // Simulate write failure
        TestUtil.addCoprocessor(conn, fullIndexName, WriteFailingRegionObserver.class);
        conn.createStatement().execute("UPSERT INTO " + fullTableName + " VALUES('dddd','dddd','3333')");
        try {
            conn.commit();
            fail();
        } catch (CommitException e) {
        // Expected
        }
        assertTrue(TestUtil.checkIndexState(conn, fullIndexName, indexStateOnFailure, null));
        PhoenixStatement stmt = conn.createStatement().unwrap(PhoenixStatement.class);
        ResultSet rs = stmt.executeQuery("SELECT V2 FROM " + fullTableName + " WHERE V1 = 'a'");
        assertTrue(rs.next());
        assertEquals("0", rs.getString(1));
        assertEquals(indexStateOnFailure == PIndexState.DISABLE ? fullTableName : fullIndexName, stmt.getQueryPlan().getContext().getCurrentTable().getTable().getName().getString());
        TestUtil.removeCoprocessor(conn, fullIndexName, WriteFailingRegionObserver.class);
        clock.time += 1000;
        waitForIndexState(conn, fullTableName, fullIndexName, indexStateOnFailure == PIndexState.DISABLE ? PIndexState.INACTIVE : PIndexState.ACTIVE);
        clock.time += WAIT_AFTER_DISABLED;
        // First batch should have been processed again because we started over
        runIndexRebuilder(fullTableName);
        assertEquals(3, TestUtil.getRowCount(conn, fullIndexName));
        clock.time += 2 * REBUILD_PERIOD;
        // Second batch should have been processed now
        runIndexRebuilder(fullTableName);
        clock.time += 2 * REBUILD_PERIOD;
        runIndexRebuilder(fullTableName);
        TestUtil.assertIndexState(conn, fullIndexName, PIndexState.ACTIVE, 0L);
        // Verify that other batches were processed
        IndexScrutiny.scrutinizeIndex(conn, fullTableName, fullIndexName);
    } finally {
        EnvironmentEdgeManager.injectEdge(null);
    }
}
Also used : CommitException(org.apache.phoenix.execute.CommitException) PMetaData(org.apache.phoenix.schema.PMetaData) PhoenixConnection(org.apache.phoenix.jdbc.PhoenixConnection) Connection(java.sql.Connection) PhoenixConnection(org.apache.phoenix.jdbc.PhoenixConnection) ResultSet(java.sql.ResultSet) PTableKey(org.apache.phoenix.schema.PTableKey) HTableInterface(org.apache.hadoop.hbase.client.HTableInterface) PhoenixStatement(org.apache.phoenix.jdbc.PhoenixStatement)

Aggregations

CommitException (org.apache.phoenix.execute.CommitException)6 Connection (java.sql.Connection)4 PreparedStatement (java.sql.PreparedStatement)2 Test (org.junit.Test)2 ResultSet (java.sql.ResultSet)1 Properties (java.util.Properties)1 HTableInterface (org.apache.hadoop.hbase.client.HTableInterface)1 PhoenixConnection (org.apache.phoenix.jdbc.PhoenixConnection)1 PhoenixStatement (org.apache.phoenix.jdbc.PhoenixStatement)1 PMetaData (org.apache.phoenix.schema.PMetaData)1 PTableKey (org.apache.phoenix.schema.PTableKey)1