use of org.apache.htrace.core.TraceScope in project YCSB by brianfrankcooper.
the class DBWrapper method scan.
/**
* Perform a range scan for a set of records in the database.
* Each field/value pair from the result will be stored in a HashMap.
*
* @param table The name of the table
* @param startkey The record key of the first record to read.
* @param recordcount The number of records to read
* @param fields The list of fields to read, or null for all of them
* @param result A Vector of HashMaps, where each HashMap is a set field/value pairs for one record
* @return The result of the operation.
*/
public Status scan(String table, String startkey, int recordcount, Set<String> fields, Vector<HashMap<String, ByteIterator>> result) {
try (final TraceScope span = tracer.newScope(scopeStringScan)) {
long ist = measurements.getIntendedtartTimeNs();
long st = System.nanoTime();
Status res = db.scan(table, startkey, recordcount, fields, result);
long en = System.nanoTime();
measure("SCAN", res, ist, st, en);
measurements.reportStatus("SCAN", res);
return res;
}
}
use of org.apache.htrace.core.TraceScope in project YCSB by brianfrankcooper.
the class DBWrapper method read.
/**
* Read a record from the database. Each field/value pair from the result
* will be stored in a HashMap.
*
* @param table The name of the table
* @param key The record key of the record to read.
* @param fields The list of fields to read, or null for all of them
* @param result A HashMap of field/value pairs for the result
* @return The result of the operation.
*/
public Status read(String table, String key, Set<String> fields, HashMap<String, ByteIterator> result) {
try (final TraceScope span = tracer.newScope(scopeStringRead)) {
long ist = measurements.getIntendedtartTimeNs();
long st = System.nanoTime();
Status res = db.read(table, key, fields, result);
long en = System.nanoTime();
measure("READ", res, ist, st, en);
measurements.reportStatus("READ", res);
return res;
}
}
use of org.apache.htrace.core.TraceScope in project YCSB by brianfrankcooper.
the class DBWrapper method insert.
/**
* Insert a record in the database. Any field/value pairs in the specified
* values HashMap will be written into the record with the specified
* record key.
*
* @param table The name of the table
* @param key The record key of the record to insert.
* @param values A HashMap of field/value pairs to insert in the record
* @return The result of the operation.
*/
public Status insert(String table, String key, HashMap<String, ByteIterator> values) {
try (final TraceScope span = tracer.newScope(scopeStringInsert)) {
long ist = measurements.getIntendedtartTimeNs();
long st = System.nanoTime();
Status res = db.insert(table, key, values);
long en = System.nanoTime();
measure("INSERT", res, ist, st, en);
measurements.reportStatus("INSERT", res);
return res;
}
}
use of org.apache.htrace.core.TraceScope in project hadoop by apache.
the class FSOutputSummer method writeChecksumChunks.
/** Generate checksums for the given data chunks and output chunks & checksums
* to the underlying output stream.
*/
private void writeChecksumChunks(byte[] b, int off, int len) throws IOException {
sum.calculateChunkedSums(b, off, len, checksum, 0);
TraceScope scope = createWriteTraceScope();
try {
for (int i = 0; i < len; i += sum.getBytesPerChecksum()) {
int chunkLen = Math.min(sum.getBytesPerChecksum(), len - i);
int ckOffset = i / sum.getBytesPerChecksum() * getChecksumSize();
writeChunk(b, off + i, chunkLen, checksum, ckOffset, getChecksumSize());
}
} finally {
if (scope != null) {
scope.close();
}
}
}
use of org.apache.htrace.core.TraceScope in project hadoop by apache.
the class DFSOutputStream method closeImpl.
protected synchronized void closeImpl() throws IOException {
if (isClosed()) {
getStreamer().getLastException().check(true);
return;
}
try {
// flush from all upper layers
flushBuffer();
if (currentPacket != null) {
enqueueCurrentPacket();
}
if (getStreamer().getBytesCurBlock() != 0) {
setCurrentPacketToEmpty();
}
// flush all data to Datanodes
flushInternal();
// get last block before destroying the streamer
ExtendedBlock lastBlock = getStreamer().getBlock();
try (TraceScope ignored = dfsClient.getTracer().newScope("completeFile")) {
completeFile(lastBlock);
}
} catch (ClosedChannelException ignored) {
} finally {
// Failures may happen when flushing data.
// Streamers may keep waiting for the new block information.
// Thus need to force closing these threads.
// Don't need to call setClosed() because closeThreads(true)
// calls setClosed() in the finally block.
closeThreads(true);
}
}
Aggregations