use of org.apache.htrace.TraceScope in project hbase by apache.
the class RecoverableZooKeeper method create.
/**
* <p>
* NONSEQUENTIAL create is idempotent operation.
* Retry before throwing exceptions.
* But this function will not throw the NodeExist exception back to the
* application.
* </p>
* <p>
* But SEQUENTIAL is NOT idempotent operation. It is necessary to add
* identifier to the path to verify, whether the previous one is successful
* or not.
* </p>
*
* @return Path
*/
public String create(String path, byte[] data, List<ACL> acl, CreateMode createMode) throws KeeperException, InterruptedException {
TraceScope traceScope = null;
try {
traceScope = Trace.startSpan("RecoverableZookeeper.create");
byte[] newData = appendMetaData(data);
switch(createMode) {
case EPHEMERAL:
case PERSISTENT:
return createNonSequential(path, newData, acl, createMode);
case EPHEMERAL_SEQUENTIAL:
case PERSISTENT_SEQUENTIAL:
return createSequential(path, newData, acl, createMode);
default:
throw new IllegalArgumentException("Unrecognized CreateMode: " + createMode);
}
} finally {
if (traceScope != null)
traceScope.close();
}
}
use of org.apache.htrace.TraceScope in project hbase by apache.
the class RecoverableZooKeeper method getData.
/**
* getData is an idempotent operation. Retry before throwing exception
* @return Data
*/
public byte[] getData(String path, Watcher watcher, Stat stat) throws KeeperException, InterruptedException {
TraceScope traceScope = null;
try {
traceScope = Trace.startSpan("RecoverableZookeeper.getData");
RetryCounter retryCounter = retryCounterFactory.create();
while (true) {
try {
byte[] revData = checkZk().getData(path, watcher, stat);
return removeMetaData(revData);
} catch (KeeperException e) {
switch(e.code()) {
case CONNECTIONLOSS:
case OPERATIONTIMEOUT:
retryOrThrow(retryCounter, e, "getData");
break;
default:
throw e;
}
}
retryCounter.sleepUntilNextRetry();
}
} finally {
if (traceScope != null)
traceScope.close();
}
}
use of org.apache.htrace.TraceScope in project hbase by apache.
the class RecoverableZooKeeper method exists.
/**
* exists is an idempotent operation. Retry before throwing exception
* @return A Stat instance
*/
public Stat exists(String path, Watcher watcher) throws KeeperException, InterruptedException {
TraceScope traceScope = null;
try {
traceScope = Trace.startSpan("RecoverableZookeeper.exists");
RetryCounter retryCounter = retryCounterFactory.create();
while (true) {
try {
return checkZk().exists(path, watcher);
} catch (KeeperException e) {
switch(e.code()) {
case CONNECTIONLOSS:
case OPERATIONTIMEOUT:
retryOrThrow(retryCounter, e, "exists");
break;
default:
throw e;
}
}
retryCounter.sleepUntilNextRetry();
}
} finally {
if (traceScope != null)
traceScope.close();
}
}
use of org.apache.htrace.TraceScope in project hbase by apache.
the class HFileReaderImpl method readBlock.
@Override
public HFileBlock readBlock(long dataBlockOffset, long onDiskBlockSize, final boolean cacheBlock, boolean pread, final boolean isCompaction, boolean updateCacheMetrics, BlockType expectedBlockType, DataBlockEncoding expectedDataBlockEncoding) throws IOException {
if (dataBlockIndexReader == null) {
throw new IOException("Block index not loaded");
}
long trailerOffset = trailer.getLoadOnOpenDataOffset();
if (dataBlockOffset < 0 || dataBlockOffset >= trailerOffset) {
throw new IOException("Requested block is out of range: " + dataBlockOffset + ", lastDataBlockOffset: " + trailer.getLastDataBlockOffset() + ", trailer.getLoadOnOpenDataOffset: " + trailerOffset);
}
// For any given block from any given file, synchronize reads for said
// block.
// Without a cache, this synchronizing is needless overhead, but really
// the other choice is to duplicate work (which the cache would prevent you
// from doing).
BlockCacheKey cacheKey = new BlockCacheKey(name, dataBlockOffset, this.isPrimaryReplicaReader(), expectedBlockType);
boolean useLock = false;
IdLock.Entry lockEntry = null;
TraceScope traceScope = Trace.startSpan("HFileReaderImpl.readBlock");
try {
while (true) {
// Check cache for block. If found return.
if (cacheConf.shouldReadBlockFromCache(expectedBlockType)) {
if (useLock) {
lockEntry = offsetLock.getLockEntry(dataBlockOffset);
}
// Try and get the block from the block cache. If the useLock variable is true then this
// is the second time through the loop and it should not be counted as a block cache miss.
HFileBlock cachedBlock = getCachedBlock(cacheKey, cacheBlock, useLock, isCompaction, updateCacheMetrics, expectedBlockType, expectedDataBlockEncoding);
if (cachedBlock != null) {
if (LOG.isTraceEnabled()) {
LOG.trace("From Cache " + cachedBlock);
}
if (Trace.isTracing()) {
traceScope.getSpan().addTimelineAnnotation("blockCacheHit");
}
assert cachedBlock.isUnpacked() : "Packed block leak.";
if (cachedBlock.getBlockType().isData()) {
if (updateCacheMetrics) {
HFile.DATABLOCK_READ_COUNT.increment();
}
// type in the cache key, and we expect it to match on a cache hit.
if (cachedBlock.getDataBlockEncoding() != dataBlockEncoder.getDataBlockEncoding()) {
throw new IOException("Cached block under key " + cacheKey + " " + "has wrong encoding: " + cachedBlock.getDataBlockEncoding() + " (expected: " + dataBlockEncoder.getDataBlockEncoding() + ")");
}
}
// Cache-hit. Return!
return cachedBlock;
}
if (!useLock && cacheBlock && cacheConf.shouldLockOnCacheMiss(expectedBlockType)) {
// check cache again with lock
useLock = true;
continue;
}
// Carry on, please load.
}
if (Trace.isTracing()) {
traceScope.getSpan().addTimelineAnnotation("blockCacheMiss");
}
// Load block from filesystem.
HFileBlock hfileBlock = fsBlockReader.readBlockData(dataBlockOffset, onDiskBlockSize, pread);
validateBlockType(hfileBlock, expectedBlockType);
HFileBlock unpacked = hfileBlock.unpack(hfileContext, fsBlockReader);
BlockType.BlockCategory category = hfileBlock.getBlockType().getCategory();
// Cache the block if necessary
if (cacheBlock && cacheConf.shouldCacheBlockOnRead(category)) {
cacheConf.getBlockCache().cacheBlock(cacheKey, cacheConf.shouldCacheCompressed(category) ? hfileBlock : unpacked, cacheConf.isInMemory(), this.cacheConf.isCacheDataInL1());
}
if (updateCacheMetrics && hfileBlock.getBlockType().isData()) {
HFile.DATABLOCK_READ_COUNT.increment();
}
return unpacked;
}
} finally {
traceScope.close();
if (lockEntry != null) {
offsetLock.releaseLockEntry(lockEntry);
}
}
}
use of org.apache.htrace.TraceScope in project hbase by apache.
the class IntegrationTestSendTraceRequests method insertData.
private LinkedBlockingQueue<Long> insertData() throws IOException, InterruptedException {
LinkedBlockingQueue<Long> rowKeys = new LinkedBlockingQueue<>(25000);
BufferedMutator ht = util.getConnection().getBufferedMutator(this.tableName);
byte[] value = new byte[300];
for (int x = 0; x < 5000; x++) {
TraceScope traceScope = Trace.startSpan("insertData", Sampler.ALWAYS);
try {
for (int i = 0; i < 5; i++) {
long rk = random.nextLong();
rowKeys.add(rk);
Put p = new Put(Bytes.toBytes(rk));
for (int y = 0; y < 10; y++) {
random.nextBytes(value);
p.addColumn(familyName, Bytes.toBytes(random.nextLong()), value);
}
ht.mutate(p);
}
if ((x % 1000) == 0) {
admin.flush(tableName);
}
} finally {
traceScope.close();
}
}
admin.flush(tableName);
return rowKeys;
}
Aggregations