use of org.apache.hadoop.mapreduce.Counters in project phoenix by apache.
the class IndexScrutinyToolIT method testBothDataAndIndexAsSource.
/**
* Tests running with both the index and data tables as the source table If we have an
* incorrectly indexed row, it should be reported in each direction
*/
@Test
public void testBothDataAndIndexAsSource() throws Exception {
if (isOnlyIndexSingleCell()) {
return;
}
// insert one valid row
upsertRow(dataTableUpsertStmt, 1, "name-1", 94010);
conn.commit();
// disable the index and insert another row which is not indexed
disableIndex();
upsertRow(dataTableUpsertStmt, 2, "name-2", 95123);
conn.commit();
// insert a bad row into the index
upsertIndexRow("badName", 2, 9999);
conn.commit();
List<Job> completedJobs = runScrutiny(schemaName, dataTableName, indexTableName, 10L, SourceTable.BOTH);
assertEquals(2, completedJobs.size());
for (Job job : completedJobs) {
assertTrue(job.isSuccessful());
Counters counters = job.getCounters();
assertEquals(1, getCounterValue(counters, VALID_ROW_COUNT));
assertEquals(1, getCounterValue(counters, INVALID_ROW_COUNT));
}
}
use of org.apache.hadoop.mapreduce.Counters in project phoenix by apache.
the class IndexScrutinyToolIT method testValidIndex.
/**
* Tests a data table that is correctly indexed. Scrutiny should report all rows as valid.
*/
@Test
public void testValidIndex() throws Exception {
// insert two rows
upsertRow(dataTableUpsertStmt, 1, "name-1", 94010);
upsertRow(dataTableUpsertStmt, 2, "name-2", 95123);
conn.commit();
int numDataRows = countRows(conn, dataTableFullName);
int numIndexRows = countRows(conn, indexTableFullName);
// scrutiny should report everything as ok
List<Job> completedJobs = runScrutiny(schemaName, dataTableName, indexTableName);
Job job = completedJobs.get(0);
assertTrue(job.isSuccessful());
Counters counters = job.getCounters();
assertEquals(2, getCounterValue(counters, VALID_ROW_COUNT));
assertEquals(0, getCounterValue(counters, INVALID_ROW_COUNT));
// make sure row counts weren't modified by scrutiny
assertEquals(numDataRows, countRows(conn, dataTableFullName));
assertEquals(numIndexRows, countRows(conn, indexTableFullName));
}
use of org.apache.hadoop.mapreduce.Counters in project phoenix by apache.
the class IndexScrutinyToolIT method testEqualRowCountIndexIncorrect.
/**
* Tests an index with the same # of rows as the data table, but one of the index rows is
* incorrect Scrutiny should report the invalid rows.
*/
@Test
public void testEqualRowCountIndexIncorrect() throws Exception {
// insert one valid row
upsertRow(dataTableUpsertStmt, 1, "name-1", 94010);
conn.commit();
// disable the index and insert another row which is not indexed
disableIndex();
upsertRow(dataTableUpsertStmt, 2, "name-2", 95123);
conn.commit();
// insert a bad row into the index
upsertIndexRow("badName", 2, 9999);
conn.commit();
// scrutiny should report the bad row
List<Job> completedJobs = runScrutiny(schemaName, dataTableName, indexTableName);
Job job = completedJobs.get(0);
assertTrue(job.isSuccessful());
Counters counters = job.getCounters();
assertEquals(1, getCounterValue(counters, VALID_ROW_COUNT));
assertEquals(1, getCounterValue(counters, INVALID_ROW_COUNT));
}
use of org.apache.hadoop.mapreduce.Counters in project phoenix by apache.
the class IndexScrutinyToolIT method testMoreIndexRows.
/**
* Tests when there are more index table rows than data table rows Scrutiny should report the
* number of incorrect rows when run with the index as the source table
*/
@Test
public void testMoreIndexRows() throws Exception {
if (isOnlyIndexSingleCell()) {
return;
}
upsertRow(dataTableUpsertStmt, 1, "name-1", 95123);
conn.commit();
disableIndex();
// these index rows won't have a corresponding data row
upsertIndexRow("name-2", 2, 95124);
upsertIndexRow("name-3", 3, 95125);
conn.commit();
List<Job> completedJobs = runScrutiny(schemaName, dataTableName, indexTableName, 10L, SourceTable.INDEX_TABLE_SOURCE);
Job job = completedJobs.get(0);
assertTrue(job.isSuccessful());
Counters counters = job.getCounters();
assertEquals(1, getCounterValue(counters, VALID_ROW_COUNT));
assertEquals(2, getCounterValue(counters, INVALID_ROW_COUNT));
}
use of org.apache.hadoop.mapreduce.Counters in project phoenix by apache.
the class NonParameterizedIndexScrutinyToolIT method testScrutinyOnRowsNearExpiry.
@Test
public void testScrutinyOnRowsNearExpiry() throws Exception {
String schema = generateUniqueName();
String dataTableName = generateUniqueName();
String indexTableName = generateUniqueName();
String dataTableFullName = SchemaUtil.getTableName(schema, dataTableName);
String dataTableDDL = "CREATE TABLE %s (ID INTEGER NOT NULL PRIMARY KEY, NAME VARCHAR, " + "ZIP INTEGER) TTL=" + TEST_TABLE_TTL;
String indexTableDDL = "CREATE INDEX %s ON %s (NAME) INCLUDE (ZIP)";
String upsertData = "UPSERT INTO %s VALUES (?, ?, ?)";
IndexScrutinyMapperForTest.ScrutinyTestClock testClock = new IndexScrutinyMapperForTest.ScrutinyTestClock(0);
try (Connection conn = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES))) {
conn.createStatement().execute(String.format(dataTableDDL, dataTableFullName));
conn.createStatement().execute(String.format(indexTableDDL, indexTableName, dataTableFullName));
// insert two rows
PreparedStatement upsertDataStmt = conn.prepareStatement(String.format(upsertData, dataTableFullName));
EnvironmentEdgeManager.injectEdge(testClock);
upsertRow(upsertDataStmt, 1, "name-1", 98051);
upsertRow(upsertDataStmt, 2, "name-2", 98052);
conn.commit();
List<Job> completedJobs = runScrutiny(schema, dataTableName, indexTableName);
Job job = completedJobs.get(0);
assertTrue(job.isSuccessful());
Counters counters = job.getCounters();
assertEquals(2, getCounterValue(counters, EXPIRED_ROW_COUNT));
assertEquals(0, getCounterValue(counters, VALID_ROW_COUNT));
assertEquals(0, getCounterValue(counters, INVALID_ROW_COUNT));
} finally {
EnvironmentEdgeManager.reset();
}
}
Aggregations