use of org.apache.hadoop.mapreduce.Counters in project phoenix by apache.
the class IndexScrutinyToolForTenantIT method testGlobalViewOnMultiTenantTable.
/**
* Tests global view on multi-tenant table should work too
*/
@Test
public void testGlobalViewOnMultiTenantTable() throws Exception {
String globalViewName = generateUniqueName();
String indexNameGlobal = generateUniqueName();
connGlobal.createStatement().execute(String.format(createViewStr, globalViewName, multiTenantTable));
String idxStmtGlobal = String.format(createIndexStr, indexNameGlobal, globalViewName);
connGlobal.createStatement().execute(idxStmtGlobal);
connGlobal.createStatement().execute(String.format(upsertQueryStr, globalViewName, "global", 5, "x"));
connGlobal.commit();
String[] argValues = getArgValues("", globalViewName, indexNameGlobal, 10L, SourceTable.INDEX_TABLE_SOURCE, false, null, null, null, EnvironmentEdgeManager.currentTimeMillis());
List<Job> completedJobs = runScrutiny(IndexScrutinyMapperForTest.class, argValues);
// Sunny case, both index and view are equal. 1 row
assertEquals(1, completedJobs.size());
for (Job job : completedJobs) {
assertTrue(job.isSuccessful());
Counters counters = job.getCounters();
assertEquals(1, getCounterValue(counters, VALID_ROW_COUNT));
assertEquals(0, getCounterValue(counters, INVALID_ROW_COUNT));
}
}
use of org.apache.hadoop.mapreduce.Counters in project phoenix by apache.
the class IndexScrutinyToolForTenantIT method testWithOutput.
private void testWithOutput(OutputFormat outputFormat) throws Exception {
connTenant.createStatement().execute(String.format(upsertQueryStr, tenantViewName, tenantId, 1, "x"));
connTenant.createStatement().execute(String.format(upsertQueryStr, tenantViewName, tenantId, 2, "x2"));
connTenant.createStatement().execute(String.format(upsertQueryStr, tenantViewName, tenantId, 3, "x3"));
connTenant.createStatement().execute(String.format("UPSERT INTO %s (\":ID\", \"0:NAME\") values (%d, '%s')", indexNameTenant, 5555, "wrongName"));
connTenant.commit();
ConnectionQueryServices queryServices = connGlobal.unwrap(PhoenixConnection.class).getQueryServices();
Admin admin = queryServices.getAdmin();
TableName tableName = TableName.valueOf(viewIndexTableName);
admin.disableTable(tableName);
admin.truncateTable(tableName, false);
String[] argValues = getArgValues("", tenantViewName, indexNameTenant, 10L, SourceTable.DATA_TABLE_SOURCE, true, outputFormat, null, tenantId, EnvironmentEdgeManager.currentTimeMillis());
List<Job> completedJobs = runScrutiny(IndexScrutinyMapperForTest.class, argValues);
assertEquals(1, completedJobs.size());
for (Job job : completedJobs) {
assertTrue(job.isSuccessful());
Counters counters = job.getCounters();
assertEquals(0, getCounterValue(counters, VALID_ROW_COUNT));
assertEquals(3, getCounterValue(counters, INVALID_ROW_COUNT));
}
}
use of org.apache.hadoop.mapreduce.Counters in project phoenix by apache.
the class IndexScrutinyToolIT method testCoveredValueIncorrect.
/**
* Tests an index where the index pk is correct (indexed col values are indexed correctly), but
* a covered index value is incorrect. Scrutiny should report the invalid row
*/
@Test
public void testCoveredValueIncorrect() throws Exception {
if (isOnlyIndexSingleCell()) {
return;
}
// insert one valid row
upsertRow(dataTableUpsertStmt, 1, "name-1", 94010);
conn.commit();
// disable index and insert another data row
disableIndex();
upsertRow(dataTableUpsertStmt, 2, "name-2", 95123);
conn.commit();
// insert a bad index row for the above data row
upsertIndexRow("name-2", 2, 9999);
conn.commit();
// scrutiny should report the bad row
List<Job> completedJobs = runScrutiny(schemaName, dataTableName, indexTableName);
Job job = completedJobs.get(0);
assertTrue(job.isSuccessful());
Counters counters = job.getCounters();
assertEquals(1, getCounterValue(counters, VALID_ROW_COUNT));
assertEquals(1, getCounterValue(counters, INVALID_ROW_COUNT));
assertEquals(1, getCounterValue(counters, BAD_COVERED_COL_VAL_COUNT));
}
use of org.apache.hadoop.mapreduce.Counters in project phoenix by apache.
the class IndexScrutinyToolIT method testBatching.
/**
* Test batching of row comparisons Inserts 1001 rows, with some random bad rows, and runs
* scrutiny with batchsize of 10,
*/
@Test
public void testBatching() throws Exception {
// insert 1001 data and index rows
int numTestRows = 1001;
for (int i = 0; i < numTestRows; i++) {
upsertRow(dataTableUpsertStmt, i, "name-" + i, i + 1000);
}
conn.commit();
disableIndex();
// randomly delete some rows from the index
Random random = new Random();
for (int i = 0; i < 100; i++) {
int idToDelete = random.nextInt(numTestRows);
deleteRow(indexTableFullName, "WHERE \":ID\"=" + idToDelete);
}
conn.commit();
int numRows = countRows(conn, indexTableFullName);
int numDeleted = numTestRows - numRows;
// run scrutiny with batch size of 10
List<Job> completedJobs = runScrutiny(schemaName, dataTableName, indexTableName, 10L);
Job job = completedJobs.get(0);
assertTrue(job.isSuccessful());
Counters counters = job.getCounters();
assertEquals(numTestRows - numDeleted, getCounterValue(counters, VALID_ROW_COUNT));
assertEquals(numDeleted, getCounterValue(counters, INVALID_ROW_COUNT));
assertEquals(numTestRows / 10 + numTestRows % 10, getCounterValue(counters, BATCHES_PROCESSED_COUNT));
}
use of org.apache.hadoop.mapreduce.Counters in project phoenix by apache.
the class IndexScrutinyToolIT method testMoreDataRows.
/**
* Tests when there are more data table rows than index table rows Scrutiny should report the
* number of incorrect rows
*/
@Test
public void testMoreDataRows() throws Exception {
upsertRow(dataTableUpsertStmt, 1, "name-1", 95123);
conn.commit();
disableIndex();
// these rows won't have a corresponding index row
upsertRow(dataTableUpsertStmt, 2, "name-2", 95124);
upsertRow(dataTableUpsertStmt, 3, "name-3", 95125);
conn.commit();
List<Job> completedJobs = runScrutiny(schemaName, dataTableName, indexTableName);
Job job = completedJobs.get(0);
assertTrue(job.isSuccessful());
Counters counters = job.getCounters();
assertEquals(1, getCounterValue(counters, VALID_ROW_COUNT));
assertEquals(2, getCounterValue(counters, INVALID_ROW_COUNT));
}
Aggregations