use of edu.umd.cs.findbugs.annotations.SuppressFBWarnings in project hazelcast by hazelcast.
the class JVMUtil method isHotSpotCompressedOopsOrNull.
// not private for testing
@SuppressFBWarnings("NP_BOOLEAN_RETURN_NULL")
static Boolean isHotSpotCompressedOopsOrNull() {
try {
MBeanServer server = ManagementFactory.getPlatformMBeanServer();
ObjectName mbean = new ObjectName("com.sun.management:type=HotSpotDiagnostic");
Object[] objects = { "UseCompressedOops" };
String[] strings = { "java.lang.String" };
String operation = "getVMOption";
CompositeDataSupport compressedOopsValue = (CompositeDataSupport) server.invoke(mbean, operation, objects, strings);
return Boolean.valueOf(compressedOopsValue.get("value").toString());
} catch (Exception e) {
getLogger(JVMUtil.class).fine("Failed to read HotSpot specific configuration: " + e.getMessage());
}
return null;
}
use of edu.umd.cs.findbugs.annotations.SuppressFBWarnings in project jmxtrans by jmxtrans.
the class JmxTransformer method stopServices.
// There is a sleep to work around a Quartz issue. The issue is marked to be
// fixed, but will require further analysis. This should not be reported by
// Findbugs, but as a more complex issue.
@SuppressFBWarnings(value = "SWL_SLEEP_WITH_LOCK_HELD", justification = "Workaround for Quartz issue")
private synchronized void stopServices() throws LifecycleException {
try {
// Shutdown the scheduler
if (serverScheduler.isStarted()) {
serverScheduler.shutdown(true);
log.debug("Shutdown server scheduler");
try {
// FIXME: Quartz issue, need to sleep
Thread.sleep(1500);
} catch (InterruptedException e) {
log.error(e.getMessage(), e);
currentThread().interrupt();
}
}
shutdownAndAwaitTermination(queryProcessorExecutor, 10, SECONDS);
shutdownAndAwaitTermination(resultProcessorExecutor, 10, SECONDS);
// Shutdown the file watch service
if (watcher != null) {
watcher.stopService();
watcher = null;
log.debug("Shutdown watch service");
}
// Shutdown the outputwriters
stopWriterAndClearMasterServerList();
} catch (Exception e) {
throw new LifecycleException(e);
}
}
use of edu.umd.cs.findbugs.annotations.SuppressFBWarnings in project orientdb by orientechnologies.
the class OLocalHashTable method create.
@SuppressFBWarnings("DLS_DEAD_LOCAL_STORE")
@Override
public void create(OBinarySerializer<K> keySerializer, OBinarySerializer<V> valueSerializer, OType[] keyTypes, boolean nullKeyIsSupported) {
startOperation();
try {
final OAtomicOperation atomicOperation;
try {
atomicOperation = startAtomicOperation(false);
} catch (IOException e) {
throw OException.wrapException(new OIndexException("Error during hash table creation"), e);
}
acquireExclusiveLock();
try {
try {
if (keyTypes != null)
this.keyTypes = Arrays.copyOf(keyTypes, keyTypes.length);
else
this.keyTypes = null;
this.nullKeyIsSupported = nullKeyIsSupported;
this.directory = new OHashTableDirectory(treeStateFileExtension, getName(), getFullName(), durableInNonTxMode, storage);
fileStateId = addFile(atomicOperation, getName() + metadataConfigurationFileExtension);
directory.create();
final OCacheEntry hashStateEntry = addPage(atomicOperation, fileStateId);
pinPage(atomicOperation, hashStateEntry);
hashStateEntry.acquireExclusiveLock();
try {
OHashIndexFileLevelMetadataPage page = new OHashIndexFileLevelMetadataPage(hashStateEntry, getChanges(atomicOperation, hashStateEntry), true);
hashStateEntryIndex = hashStateEntry.getPageIndex();
} finally {
hashStateEntry.releaseExclusiveLock();
releasePage(atomicOperation, hashStateEntry);
}
final String fileName = getFullName();
fileId = addFile(atomicOperation, fileName);
setKeySerializer(keySerializer);
setValueSerializer(valueSerializer);
initHashTreeState(atomicOperation);
if (nullKeyIsSupported)
nullBucketFileId = addFile(atomicOperation, getName() + nullBucketFileExtension);
endAtomicOperation(false, null);
} catch (IOException e) {
endAtomicOperation(true, e);
throw e;
} catch (Exception e) {
endAtomicOperation(true, e);
throw OException.wrapException(new OStorageException("Error during local hash table creation"), e);
}
} catch (IOException e) {
throw OException.wrapException(new OIndexException("Error during local hash table creation"), e);
} finally {
releaseExclusiveLock();
}
} finally {
completeOperation();
}
}
use of edu.umd.cs.findbugs.annotations.SuppressFBWarnings in project orientdb by orientechnologies.
the class OLocalHashTable20 method initHashTreeState.
@SuppressFBWarnings("DLS_DEAD_LOCAL_STORE")
private void initHashTreeState(OAtomicOperation atomicOperation) throws IOException {
for (long pageIndex = 0; pageIndex < MAX_LEVEL_SIZE; pageIndex++) {
final OCacheEntry cacheEntry = loadPageEntry(pageIndex, 0, atomicOperation);
cacheEntry.acquireExclusiveLock();
try {
final OHashIndexBucket<K, V> emptyBucket = new OHashIndexBucket<K, V>(MAX_LEVEL_DEPTH, cacheEntry, keySerializer, valueSerializer, keyTypes, getChanges(atomicOperation, cacheEntry));
} finally {
cacheEntry.releaseExclusiveLock();
releasePage(atomicOperation, cacheEntry);
}
}
final long[] rootTree = new long[MAX_LEVEL_SIZE];
for (int i = 0; i < MAX_LEVEL_SIZE; i++) rootTree[i] = createBucketPointer(i, 0);
directory.clear();
directory.addNewNode((byte) 0, (byte) 0, (byte) MAX_LEVEL_DEPTH, rootTree);
OCacheEntry hashStateEntry = loadPage(atomicOperation, fileStateId, hashStateEntryIndex, true);
hashStateEntry.acquireExclusiveLock();
try {
OHashIndexFileLevelMetadataPage metadataPage = new OHashIndexFileLevelMetadataPage(hashStateEntry, getChanges(atomicOperation, hashStateEntry), false);
metadataPage.setBucketsCount(0, MAX_LEVEL_SIZE);
metadataPage.setRecordsCount(0);
} finally {
hashStateEntry.releaseExclusiveLock();
releasePage(atomicOperation, hashStateEntry);
}
}
use of edu.umd.cs.findbugs.annotations.SuppressFBWarnings in project orientdb by orientechnologies.
the class OLogSegment method readRecord.
@SuppressFBWarnings(value = "PZLA_PREFER_ZERO_LENGTH_ARRAYS")
public byte[] readRecord(OLogSequenceNumber lsn, ByteBuffer byteBuffer) throws IOException {
final OPair<OLogSequenceNumber, byte[]> lastRecord = lastReadRecord.get();
if (lastRecord != null && lastRecord.getKey().equals(lsn))
return lastRecord.getValue();
assert lsn.getSegment() == order;
if (lsn.getPosition() >= filledUpTo)
return null;
if (!logCache.isEmpty())
flush();
long pageIndex = lsn.getPosition() / OWALPage.PAGE_SIZE;
byte[] record = null;
int pageOffset = (int) (lsn.getPosition() % OWALPage.PAGE_SIZE);
long pageCount = (filledUpTo + OWALPage.PAGE_SIZE - 1) / OWALPage.PAGE_SIZE;
while (pageIndex < pageCount) {
fileLock.lock();
try {
final RandomAccessFile rndFile = getRndFile();
final FileChannel channel = rndFile.getChannel();
byteBuffer.position(0);
channel.read(byteBuffer, pageIndex * OWALPage.PAGE_SIZE);
} finally {
fileLock.unlock();
}
if (!checkPageIntegrity(byteBuffer))
throw new OWALPageBrokenException("WAL page with index " + pageIndex + " is broken");
OWALPage page = new OWALPage(byteBuffer, false);
byte[] content = page.getRecord(pageOffset);
if (record == null)
record = content;
else {
byte[] oldRecord = record;
record = new byte[record.length + content.length];
System.arraycopy(oldRecord, 0, record, 0, oldRecord.length);
System.arraycopy(content, 0, record, oldRecord.length, record.length - oldRecord.length);
}
if (page.mergeWithNextPage(pageOffset)) {
pageOffset = OWALPage.RECORDS_OFFSET;
pageIndex++;
if (pageIndex >= pageCount)
throw new OWALPageBrokenException("WAL page with index " + pageIndex + " is broken");
} else {
if (page.getFreeSpace() >= OWALPage.MIN_RECORD_SIZE && pageIndex < pageCount - 1)
throw new OWALPageBrokenException("WAL page with index " + pageIndex + " is broken");
break;
}
}
lastReadRecord = new WeakReference<OPair<OLogSequenceNumber, byte[]>>(new OPair<OLogSequenceNumber, byte[]>(lsn, record));
return record;
}
Aggregations