use of org.apache.beam.vendor.calcite.v1_28_0.com.google.common.annotations.VisibleForTesting in project hadoop by apache.
the class ResourceManager method removeApplication.
@VisibleForTesting
static void removeApplication(Configuration conf, String applicationId) throws Exception {
RMStateStore rmStore = RMStateStoreFactory.getStore(conf);
rmStore.setResourceManager(new ResourceManager());
rmStore.init(conf);
rmStore.start();
try {
ApplicationId removeAppId = ApplicationId.fromString(applicationId);
LOG.info("Deleting application " + removeAppId + " from state store");
rmStore.removeApplication(removeAppId);
LOG.info("Application is deleted from state store");
} finally {
rmStore.stop();
}
}
use of org.apache.beam.vendor.calcite.v1_28_0.com.google.common.annotations.VisibleForTesting in project hadoop by apache.
the class NativeAzureFileSystem method pathToKey.
/**
* Convert the path to a key. By convention, any leading or trailing slash is
* removed, except for the special case of a single slash.
* @param path path converted to a key
* @return key string
*/
@VisibleForTesting
public String pathToKey(Path path) {
// Convert the path to a URI to parse the scheme, the authority, and the
// path from the path object.
URI tmpUri = path.toUri();
String pathUri = tmpUri.getPath();
// The scheme and authority is valid. If the path does not exist add a "/"
// separator to list the root of the container.
Path newPath = path;
if ("".equals(pathUri)) {
newPath = new Path(tmpUri.toString() + Path.SEPARATOR);
}
// Verify path is absolute if the path refers to a windows drive scheme.
if (!newPath.isAbsolute()) {
throw new IllegalArgumentException("Path must be absolute: " + path);
}
String key = null;
key = newPath.toUri().getPath();
key = removeTrailingSlash(key);
key = encodeTrailingPeriod(key);
if (key.length() == 1) {
return key;
} else {
// remove initial slash
return key.substring(1);
}
}
use of org.apache.beam.vendor.calcite.v1_28_0.com.google.common.annotations.VisibleForTesting in project hadoop by apache.
the class RollingLevelDBTimelineStore method evictOldStartTimes.
@VisibleForTesting
long evictOldStartTimes(long minStartTime) throws IOException {
LOG.info("Searching for start times to evict earlier than " + minStartTime);
long batchSize = 0;
long totalCount = 0;
long startTimesCount = 0;
WriteBatch writeBatch = null;
DBIterator iterator = null;
try {
writeBatch = starttimedb.createWriteBatch();
ReadOptions readOptions = new ReadOptions();
readOptions.fillCache(false);
iterator = starttimedb.iterator(readOptions);
// seek to the first start time entry
iterator.seekToFirst();
// evaluate each start time entry to see if it needs to be evicted or not
while (iterator.hasNext()) {
Map.Entry<byte[], byte[]> current = iterator.next();
byte[] entityKey = current.getKey();
byte[] entityValue = current.getValue();
long startTime = readReverseOrderedLong(entityValue, 0);
if (startTime < minStartTime) {
++batchSize;
++startTimesCount;
writeBatch.delete(entityKey);
// a large delete will hold the lock for too long
if (batchSize >= writeBatchSize) {
if (LOG.isDebugEnabled()) {
LOG.debug("Preparing to delete a batch of " + batchSize + " old start times");
}
starttimedb.write(writeBatch);
if (LOG.isDebugEnabled()) {
LOG.debug("Deleted batch of " + batchSize + ". Total start times deleted so far this cycle: " + startTimesCount);
}
IOUtils.cleanup(LOG, writeBatch);
writeBatch = starttimedb.createWriteBatch();
batchSize = 0;
}
}
++totalCount;
}
if (LOG.isDebugEnabled()) {
LOG.debug("Preparing to delete a batch of " + batchSize + " old start times");
}
starttimedb.write(writeBatch);
if (LOG.isDebugEnabled()) {
LOG.debug("Deleted batch of " + batchSize + ". Total start times deleted so far this cycle: " + startTimesCount);
}
LOG.info("Deleted " + startTimesCount + "/" + totalCount + " start time entities earlier than " + minStartTime);
} finally {
IOUtils.cleanup(LOG, writeBatch);
IOUtils.cleanup(LOG, iterator);
}
return startTimesCount;
}
use of org.apache.beam.vendor.calcite.v1_28_0.com.google.common.annotations.VisibleForTesting in project hadoop by apache.
the class LeveldbTimelineStore method deleteNextEntity.
@VisibleForTesting
boolean deleteNextEntity(String entityType, byte[] reverseTimestamp, LeveldbIterator iterator, LeveldbIterator pfIterator, boolean seeked) throws IOException {
WriteBatch writeBatch = null;
try {
KeyBuilder kb = KeyBuilder.newInstance().add(ENTITY_ENTRY_PREFIX).add(entityType);
byte[] typePrefix = kb.getBytesForLookup();
kb.add(reverseTimestamp);
if (!seeked) {
iterator.seek(kb.getBytesForLookup());
}
if (!iterator.hasNext()) {
return false;
}
byte[] entityKey = iterator.peekNext().getKey();
if (!prefixMatches(typePrefix, typePrefix.length, entityKey)) {
return false;
}
// read the start time and entity id from the current key
KeyParser kp = new KeyParser(entityKey, typePrefix.length + 8);
String entityId = kp.getNextString();
int prefixlen = kp.getOffset();
byte[] deletePrefix = new byte[prefixlen];
System.arraycopy(entityKey, 0, deletePrefix, 0, prefixlen);
writeBatch = db.createWriteBatch();
if (LOG.isDebugEnabled()) {
LOG.debug("Deleting entity type:" + entityType + " id:" + entityId);
}
// remove start time from cache and db
writeBatch.delete(createStartTimeLookupKey(entityId, entityType));
EntityIdentifier entityIdentifier = new EntityIdentifier(entityId, entityType);
startTimeReadCache.remove(entityIdentifier);
startTimeWriteCache.remove(entityIdentifier);
// delete current entity
for (; iterator.hasNext(); iterator.next()) {
byte[] key = iterator.peekNext().getKey();
if (!prefixMatches(entityKey, prefixlen, key)) {
break;
}
writeBatch.delete(key);
if (key.length == prefixlen) {
continue;
}
if (key[prefixlen] == PRIMARY_FILTERS_COLUMN[0]) {
kp = new KeyParser(key, prefixlen + PRIMARY_FILTERS_COLUMN.length);
String name = kp.getNextString();
Object value = GenericObjectMapper.read(key, kp.getOffset());
deleteKeysWithPrefix(writeBatch, addPrimaryFilterToKey(name, value, deletePrefix), pfIterator);
if (LOG.isDebugEnabled()) {
LOG.debug("Deleting entity type:" + entityType + " id:" + entityId + " primary filter entry " + name + " " + value);
}
} else if (key[prefixlen] == RELATED_ENTITIES_COLUMN[0]) {
kp = new KeyParser(key, prefixlen + RELATED_ENTITIES_COLUMN.length);
String type = kp.getNextString();
String id = kp.getNextString();
byte[] relatedEntityStartTime = getStartTime(id, type);
if (relatedEntityStartTime == null) {
LOG.warn("Found no start time for " + "related entity " + id + " of type " + type + " while " + "deleting " + entityId + " of type " + entityType);
continue;
}
writeBatch.delete(createReverseRelatedEntityKey(id, type, relatedEntityStartTime, entityId, entityType));
if (LOG.isDebugEnabled()) {
LOG.debug("Deleting entity type:" + entityType + " id:" + entityId + " from invisible reverse related entity " + "entry of type:" + type + " id:" + id);
}
} else if (key[prefixlen] == INVISIBLE_REVERSE_RELATED_ENTITIES_COLUMN[0]) {
kp = new KeyParser(key, prefixlen + INVISIBLE_REVERSE_RELATED_ENTITIES_COLUMN.length);
String type = kp.getNextString();
String id = kp.getNextString();
byte[] relatedEntityStartTime = getStartTime(id, type);
if (relatedEntityStartTime == null) {
LOG.warn("Found no start time for reverse " + "related entity " + id + " of type " + type + " while " + "deleting " + entityId + " of type " + entityType);
continue;
}
writeBatch.delete(createRelatedEntityKey(id, type, relatedEntityStartTime, entityId, entityType));
if (LOG.isDebugEnabled()) {
LOG.debug("Deleting entity type:" + entityType + " id:" + entityId + " from related entity entry of type:" + type + " id:" + id);
}
}
}
WriteOptions writeOptions = new WriteOptions();
writeOptions.sync(true);
db.write(writeBatch, writeOptions);
return true;
} catch (DBException e) {
throw new IOException(e);
} finally {
IOUtils.cleanup(LOG, writeBatch);
}
}
use of org.apache.beam.vendor.calcite.v1_28_0.com.google.common.annotations.VisibleForTesting in project hadoop by apache.
the class LeveldbTimelineStore method discardOldEntities.
/**
* Discards entities with start timestamp less than or equal to the given
* timestamp.
*/
@VisibleForTesting
void discardOldEntities(long timestamp) throws IOException, InterruptedException {
byte[] reverseTimestamp = writeReverseOrderedLong(timestamp);
long totalCount = 0;
long t1 = System.currentTimeMillis();
try {
List<String> entityTypes = getEntityTypes();
for (String entityType : entityTypes) {
LeveldbIterator iterator = null;
LeveldbIterator pfIterator = null;
long typeCount = 0;
try {
deleteLock.writeLock().lock();
iterator = getDbIterator(false);
pfIterator = getDbIterator(false);
if (deletionThread != null && deletionThread.isInterrupted()) {
throw new InterruptedException();
}
boolean seeked = false;
while (deleteNextEntity(entityType, reverseTimestamp, iterator, pfIterator, seeked)) {
typeCount++;
totalCount++;
seeked = true;
if (deletionThread != null && deletionThread.isInterrupted()) {
throw new InterruptedException();
}
}
} catch (IOException e) {
LOG.error("Got IOException while deleting entities for type " + entityType + ", continuing to next type", e);
} finally {
IOUtils.cleanup(LOG, iterator, pfIterator);
deleteLock.writeLock().unlock();
if (typeCount > 0) {
LOG.info("Deleted " + typeCount + " entities of type " + entityType);
}
}
}
} finally {
long t2 = System.currentTimeMillis();
LOG.info("Discarded " + totalCount + " entities for timestamp " + timestamp + " and earlier in " + (t2 - t1) / 1000.0 + " seconds");
}
}
Aggregations