use of org.apache.hadoop.mapreduce.Counters in project phoenix by apache.
the class IndexScrutinyTableOutput method writeJobResults.
/**
* Writes the results of the given jobs to the metadata table
* @param conn connection to use
* @param cmdLineArgs arguments the {@code IndexScrutinyTool} was run with
* @param completedJobs completed MR jobs
* @throws IOException
* @throws SQLException
*/
public static void writeJobResults(Connection conn, String[] cmdLineArgs, List<Job> completedJobs) throws IOException, SQLException {
PreparedStatement pStmt = conn.prepareStatement(UPSERT_METADATA_SQL);
for (Job job : completedJobs) {
Configuration conf = job.getConfiguration();
String qDataTable = PhoenixConfigurationUtil.getScrutinyDataTableName(conf);
final PTable pdataTable = PhoenixRuntime.getTable(conn, qDataTable);
final String qIndexTable = PhoenixConfigurationUtil.getScrutinyIndexTableName(conf);
final PTable pindexTable = PhoenixRuntime.getTable(conn, qIndexTable);
SourceTable sourceTable = PhoenixConfigurationUtil.getScrutinySourceTable(conf);
long scrutinyExecuteTime = PhoenixConfigurationUtil.getScrutinyExecuteTimestamp(conf);
SourceTargetColumnNames columnNames = SourceTable.DATA_TABLE_SOURCE.equals(sourceTable) ? new DataSourceColNames(pdataTable, pindexTable) : new IndexSourceColNames(pdataTable, pindexTable);
Counters counters = job.getCounters();
int index = 1;
pStmt.setString(index++, columnNames.getQualifiedSourceTableName());
pStmt.setString(index++, columnNames.getQualifiedTargetTableName());
pStmt.setLong(index++, scrutinyExecuteTime);
pStmt.setString(index++, sourceTable.name());
pStmt.setString(index++, Arrays.toString(cmdLineArgs));
pStmt.setLong(index++, counters.findCounter(PhoenixJobCounters.INPUT_RECORDS).getValue());
pStmt.setLong(index++, counters.findCounter(PhoenixJobCounters.FAILED_RECORDS).getValue());
pStmt.setLong(index++, counters.findCounter(PhoenixScrutinyJobCounters.VALID_ROW_COUNT).getValue());
pStmt.setLong(index++, counters.findCounter(PhoenixScrutinyJobCounters.INVALID_ROW_COUNT).getValue());
pStmt.setLong(index++, counters.findCounter(PhoenixScrutinyJobCounters.BAD_COVERED_COL_VAL_COUNT).getValue());
pStmt.setLong(index++, counters.findCounter(PhoenixScrutinyJobCounters.BATCHES_PROCESSED_COUNT).getValue());
pStmt.setString(index++, Arrays.toString(columnNames.getSourceDynamicCols().toArray()));
pStmt.setString(index++, Arrays.toString(columnNames.getTargetDynamicCols().toArray()));
pStmt.setString(index++, getSqlQueryAllInvalidRows(conn, columnNames, scrutinyExecuteTime));
pStmt.setString(index++, getSqlQueryMissingTargetRows(conn, columnNames, scrutinyExecuteTime));
pStmt.setString(index++, getSqlQueryBadCoveredColVal(conn, columnNames, scrutinyExecuteTime));
pStmt.addBatch();
}
pStmt.executeBatch();
conn.commit();
}
use of org.apache.hadoop.mapreduce.Counters in project phoenix by apache.
the class IndexScrutinyToolIT method testScrutinyWhileTakingWrites.
/**
* Tests running a scrutiny while updates and deletes are happening.
* Since CURRENT_SCN is set, the scrutiny shouldn't report any issue.
*/
@Test
public void testScrutinyWhileTakingWrites() throws Exception {
int id = 0;
while (id < 1000) {
int index = 1;
dataTableUpsertStmt.setInt(index++, id);
dataTableUpsertStmt.setString(index++, "name-" + id);
dataTableUpsertStmt.setInt(index++, id);
dataTableUpsertStmt.setTimestamp(index++, new Timestamp(testTime));
dataTableUpsertStmt.executeUpdate();
id++;
}
conn.commit();
// CURRENT_SCN for scrutiny
long scrutinyTS = EnvironmentEdgeManager.currentTimeMillis();
// launch background upserts and deletes
final Random random = new Random(0);
Runnable backgroundUpserts = new Runnable() {
@Override
public void run() {
int idToUpsert = random.nextInt(1000);
try (Connection conn = DriverManager.getConnection(getUrl(), props)) {
PreparedStatement dataPS = conn.prepareStatement(String.format(UPSERT_SQL, dataTableFullName));
upsertRow(dataPS, idToUpsert, "modified-" + idToUpsert, idToUpsert + 1000);
conn.commit();
} catch (SQLException e) {
e.printStackTrace();
}
}
};
Runnable backgroundDeletes = new Runnable() {
@Override
public void run() {
int idToDelete = random.nextInt(1000);
try (Connection conn = DriverManager.getConnection(getUrl(), props)) {
String deleteSql = String.format(DELETE_SQL, indexTableFullName) + "WHERE \":ID\"=" + idToDelete;
conn.createStatement().executeUpdate(deleteSql);
conn.commit();
} catch (SQLException e) {
e.printStackTrace();
}
}
};
ScheduledExecutorService scheduledThreadPool = Executors.newScheduledThreadPool(2);
scheduledThreadPool.scheduleWithFixedDelay(backgroundUpserts, 200, 200, TimeUnit.MILLISECONDS);
scheduledThreadPool.scheduleWithFixedDelay(backgroundDeletes, 200, 200, TimeUnit.MILLISECONDS);
// scrutiny should report everything as ok
List<Job> completedJobs = runScrutinyCurrentSCN(schemaName, dataTableName, indexTableName, scrutinyTS);
Job job = completedJobs.get(0);
assertTrue(job.isSuccessful());
Counters counters = job.getCounters();
assertEquals(1000, getCounterValue(counters, VALID_ROW_COUNT));
assertEquals(0, getCounterValue(counters, INVALID_ROW_COUNT));
scheduledThreadPool.shutdown();
scheduledThreadPool.awaitTermination(10000, TimeUnit.MILLISECONDS);
}
use of org.apache.hadoop.mapreduce.Counters in project hbase by apache.
the class TestTableMapReduce method verifyJobCountersAreEmitted.
/**
* Verify scan counters are emitted from the job
* @param job
* @throws IOException
*/
private void verifyJobCountersAreEmitted(Job job) throws IOException {
Counters counters = job.getCounters();
Counter counter = counters.findCounter(TableRecordReaderImpl.HBASE_COUNTER_GROUP_NAME, "RPC_CALLS");
assertNotNull("Unable to find Job counter for HBase scan metrics, RPC_CALLS", counter);
assertTrue("Counter value for RPC_CALLS should be larger than 0", counter.getValue() > 0);
}
use of org.apache.hadoop.mapreduce.Counters in project hbase by apache.
the class TestSyncTable method testSyncTableDoPutsFalse.
@Test
public void testSyncTableDoPutsFalse() throws Exception {
final TableName sourceTableName = TableName.valueOf(name.getMethodName() + "_source");
final TableName targetTableName = TableName.valueOf(name.getMethodName() + "_target");
Path testDir = TEST_UTIL.getDataTestDirOnTestFS("testSyncTableDoPutsFalse");
writeTestData(sourceTableName, targetTableName);
hashSourceTable(sourceTableName, testDir);
Counters syncCounters = syncTables(sourceTableName, targetTableName, testDir, "--doPuts=false");
assertTargetDoPutsFalse(70, sourceTableName, targetTableName);
assertEquals(60, syncCounters.findCounter(Counter.ROWSWITHDIFFS).getValue());
assertEquals(10, syncCounters.findCounter(Counter.SOURCEMISSINGROWS).getValue());
assertEquals(10, syncCounters.findCounter(Counter.TARGETMISSINGROWS).getValue());
assertEquals(50, syncCounters.findCounter(Counter.SOURCEMISSINGCELLS).getValue());
assertEquals(50, syncCounters.findCounter(Counter.TARGETMISSINGCELLS).getValue());
assertEquals(20, syncCounters.findCounter(Counter.DIFFERENTCELLVALUES).getValue());
TEST_UTIL.deleteTable(sourceTableName);
TEST_UTIL.deleteTable(targetTableName);
}
use of org.apache.hadoop.mapreduce.Counters in project hbase by apache.
the class TestSyncTable method testSyncTableDoDeletesFalse.
@Test
public void testSyncTableDoDeletesFalse() throws Exception {
final TableName sourceTableName = TableName.valueOf(name.getMethodName() + "_source");
final TableName targetTableName = TableName.valueOf(name.getMethodName() + "_target");
Path testDir = TEST_UTIL.getDataTestDirOnTestFS("testSyncTableDoDeletesFalse");
writeTestData(sourceTableName, targetTableName);
hashSourceTable(sourceTableName, testDir);
Counters syncCounters = syncTables(sourceTableName, targetTableName, testDir, "--doDeletes=false");
assertTargetDoDeletesFalse(100, sourceTableName, targetTableName);
assertEquals(60, syncCounters.findCounter(Counter.ROWSWITHDIFFS).getValue());
assertEquals(10, syncCounters.findCounter(Counter.SOURCEMISSINGROWS).getValue());
assertEquals(10, syncCounters.findCounter(Counter.TARGETMISSINGROWS).getValue());
assertEquals(50, syncCounters.findCounter(Counter.SOURCEMISSINGCELLS).getValue());
assertEquals(50, syncCounters.findCounter(Counter.TARGETMISSINGCELLS).getValue());
assertEquals(20, syncCounters.findCounter(Counter.DIFFERENTCELLVALUES).getValue());
TEST_UTIL.deleteTable(sourceTableName);
TEST_UTIL.deleteTable(targetTableName);
}
Aggregations