use of edu.umd.cs.findbugs.annotations.SuppressFBWarnings in project jmxtrans by jmxtrans.
the class ActiveMQ2 method main.
@SuppressFBWarnings(value = "DMI_HARDCODED_ABSOLUTE_FILENAME", justification = "Path to RRD binary is hardcoded as this is example code")
public static void main(String[] args) throws Exception {
File outputFile = new File("target/w2-TEST.rrd");
if (!outputFile.exists() && !outputFile.createNewFile()) {
throw new IOException("Could not create output file");
}
RRDToolWriter gw = RRDToolWriter.builder().setTemplateFile(new File("memorypool-rrd-template.xml")).setOutputFile(outputFile).setBinaryPath(new File("/opt/local/bin")).setDebugEnabled(true).setGenerate(true).addTypeName("Destination").build();
JmxProcess process = new JmxProcess(Server.builder().setHost("w2").setPort("1105").setAlias("w2_activemq_1105").addQuery(Query.builder().setObj("org.apache.activemq:BrokerName=localhost,Type=Queue,Destination=*").addAttr("QueueSize").addAttr("MaxEnqueueTime").addAttr("MinEnqueueTime").addAttr("AverageEnqueueTime").addAttr("InFlightCount").addAttr("ConsumerCount").addAttr("ProducerCount").addAttr("DispatchCount").addAttr("DequeueCount").addAttr("EnqueueCount").addOutputWriterFactory(gw).build()).addQuery(Query.builder().setObj("org.apache.activemq:BrokerName=localhost,Type=Topic,Destination=*").addAttr("QueueSize").addAttr("MaxEnqueueTime").addAttr("MinEnqueueTime").addAttr("AverageEnqueueTime").addAttr("InFlightCount").addAttr("ConsumerCount").addAttr("ProducerCount").addAttr("DispatchCount").addAttr("DequeueCount").addAttr("EnqueueCount").addOutputWriterFactory(gw).build()).build());
jsonPrinter.prettyPrint(process);
Injector injector = JmxTransModule.createInjector(new JmxTransConfiguration());
JmxTransformer transformer = injector.getInstance(JmxTransformer.class);
transformer.executeStandalone(process);
}
use of edu.umd.cs.findbugs.annotations.SuppressFBWarnings in project orientdb by orientechnologies.
the class ODatabaseCompare method compareRecords.
@SuppressFBWarnings("NP_NULL_ON_SOME_PATH")
private boolean compareRecords(ODocumentHelper.RIDMapper ridMapper) {
listener.onMessage("\nStarting deep comparison record by record. This may take a few minutes. Wait please...");
Collection<String> clusterNames1 = makeDbCall(databaseOne, new ODbRelatedCall<Collection<String>>() {
@Override
public Collection<String> call(ODatabaseDocumentInternal database) {
return database.getClusterNames();
}
});
for (final String clusterName : clusterNames1) {
// CHECK IF THE CLUSTER IS INCLUDED
if (includeClusters != null) {
if (!includeClusters.contains(clusterName))
continue;
} else if (excludeClusters != null) {
if (excludeClusters.contains(clusterName))
continue;
}
final int clusterId1 = makeDbCall(databaseOne, new ODbRelatedCall<Integer>() {
@Override
public Integer call(ODatabaseDocumentInternal database) {
return database.getClusterIdByName(clusterName);
}
});
final long[] db1Range = makeDbCall(databaseOne, new ODbRelatedCall<long[]>() {
@Override
public long[] call(ODatabaseDocumentInternal database) {
return database.getStorage().getClusterDataRange(clusterId1);
}
});
final long[] db2Range = makeDbCall(databaseTwo, new ODbRelatedCall<long[]>() {
@Override
public long[] call(ODatabaseDocumentInternal database) {
return database.getStorage().getClusterDataRange(clusterId1);
}
});
final long db1Max = db1Range[1];
final long db2Max = db2Range[1];
databaseOne.activateOnCurrentThread();
final ODocument doc1 = new ODocument();
databaseTwo.activateOnCurrentThread();
final ODocument doc2 = new ODocument();
final ORecordId rid = new ORecordId(clusterId1);
// TODO why this maximums can be different?
final long clusterMax = Math.max(db1Max, db2Max);
final OStorage storage;
ODatabaseDocumentInternal selectedDatabase;
if (clusterMax == db1Max)
selectedDatabase = databaseOne;
else
selectedDatabase = databaseTwo;
OPhysicalPosition[] physicalPositions = makeDbCall(selectedDatabase, new ODbRelatedCall<OPhysicalPosition[]>() {
@Override
public OPhysicalPosition[] call(ODatabaseDocumentInternal database) {
return database.getStorage().ceilingPhysicalPositions(clusterId1, new OPhysicalPosition(0));
}
});
OStorageConfiguration configuration1 = makeDbCall(databaseOne, new ODbRelatedCall<OStorageConfiguration>() {
@Override
public OStorageConfiguration call(ODatabaseDocumentInternal database) {
return database.getStorage().getConfiguration();
}
});
OStorageConfiguration configuration2 = makeDbCall(databaseTwo, new ODbRelatedCall<OStorageConfiguration>() {
@Override
public OStorageConfiguration call(ODatabaseDocumentInternal database) {
return database.getStorage().getConfiguration();
}
});
String storageType1 = makeDbCall(databaseOne, new ODbRelatedCall<String>() {
@Override
public String call(ODatabaseDocumentInternal database) {
return database.getStorage().getType();
}
});
String storageType2 = makeDbCall(databaseTwo, new ODbRelatedCall<String>() {
@Override
public String call(ODatabaseDocumentInternal database) {
return database.getStorage().getType();
}
});
long recordsCounter = 0;
while (physicalPositions.length > 0) {
for (OPhysicalPosition physicalPosition : physicalPositions) {
try {
recordsCounter++;
final long position = physicalPosition.clusterPosition;
rid.setClusterPosition(position);
if (rid.equals(new ORecordId(configuration1.indexMgrRecordId)) && rid.equals(new ORecordId(configuration2.indexMgrRecordId)))
continue;
if (rid.equals(new ORecordId(configuration1.schemaRecordId)) && rid.equals(new ORecordId(configuration2.schemaRecordId)))
continue;
if (rid.getClusterId() == 0 && rid.getClusterPosition() == 0) {
// Skip the compare of raw structure if the storage type are different, due the fact that are different by definition.
if (!storageType1.equals(storageType2))
continue;
}
final ORecordId rid2;
if (ridMapper == null)
rid2 = rid;
else {
final ORID newRid = ridMapper.map(rid);
if (newRid == null)
rid2 = rid;
else
rid2 = new ORecordId(newRid);
}
final ORawBuffer buffer1 = makeDbCall(databaseOne, new ODbRelatedCall<ORawBuffer>() {
@Override
public ORawBuffer call(ODatabaseDocumentInternal database) {
return database.getStorage().readRecord(rid, null, true, false, null).getResult();
}
});
final ORawBuffer buffer2 = makeDbCall(databaseTwo, new ODbRelatedCall<ORawBuffer>() {
@Override
public ORawBuffer call(ODatabaseDocumentInternal database) {
return database.getStorage().readRecord(rid2, null, true, false, null).getResult();
}
});
if (buffer1 == null && buffer2 == null)
// BOTH RECORD NULL, OK
continue;
else if (buffer1 == null && buffer2 != null) {
// REC1 NULL
listener.onMessage("\n- ERR: RID=" + clusterId1 + ":" + position + " is null in DB1");
++differences;
} else if (buffer1 != null && buffer2 == null) {
// REC2 NULL
listener.onMessage("\n- ERR: RID=" + clusterId1 + ":" + position + " is null in DB2");
++differences;
} else {
if (buffer1.recordType != buffer2.recordType) {
listener.onMessage("\n- ERR: RID=" + clusterId1 + ":" + position + " recordType is different: " + (char) buffer1.recordType + " <-> " + (char) buffer2.recordType);
++differences;
}
if (buffer1.buffer == null && buffer2.buffer == null) {
} else if (buffer1.buffer == null && buffer2.buffer != null) {
listener.onMessage("\n- ERR: RID=" + clusterId1 + ":" + position + " content is different: null <-> " + buffer2.buffer.length);
++differences;
} else if (buffer1.buffer != null && buffer2.buffer == null) {
listener.onMessage("\n- ERR: RID=" + clusterId1 + ":" + position + " content is different: " + buffer1.buffer.length + " <-> null");
++differences;
} else {
if (buffer1.recordType == ODocument.RECORD_TYPE) {
// DOCUMENT: TRY TO INSTANTIATE AND COMPARE
makeDbCall(databaseOne, new ODocumentHelper.ODbRelatedCall<Object>() {
public Object call(ODatabaseDocumentInternal database) {
doc1.reset();
doc1.fromStream(buffer1.buffer);
return null;
}
});
makeDbCall(databaseTwo, new ODocumentHelper.ODbRelatedCall<Object>() {
public Object call(ODatabaseDocumentInternal database) {
doc2.reset();
doc2.fromStream(buffer2.buffer);
return null;
}
});
if (rid.toString().equals(configuration1.schemaRecordId) && rid.toString().equals(configuration2.schemaRecordId)) {
makeDbCall(databaseOne, new ODocumentHelper.ODbRelatedCall<java.lang.Object>() {
public Object call(ODatabaseDocumentInternal database) {
convertSchemaDoc(doc1);
return null;
}
});
makeDbCall(databaseTwo, new ODocumentHelper.ODbRelatedCall<java.lang.Object>() {
public Object call(ODatabaseDocumentInternal database) {
convertSchemaDoc(doc2);
return null;
}
});
}
if (!ODocumentHelper.hasSameContentOf(doc1, databaseOne, doc2, databaseTwo, ridMapper)) {
listener.onMessage("\n- ERR: RID=" + clusterId1 + ":" + position + " document content is different");
listener.onMessage("\n--- REC1: " + new String(buffer1.buffer));
listener.onMessage("\n--- REC2: " + new String(buffer2.buffer));
listener.onMessage("\n");
++differences;
}
} else {
if (buffer1.buffer.length != buffer2.buffer.length) {
// CHECK IF THE TRIMMED SIZE IS THE SAME
final String rec1 = new String(buffer1.buffer).trim();
final String rec2 = new String(buffer2.buffer).trim();
if (rec1.length() != rec2.length()) {
listener.onMessage("\n- ERR: RID=" + clusterId1 + ":" + position + " content length is different: " + buffer1.buffer.length + " <-> " + buffer2.buffer.length);
if (buffer1.recordType == ODocument.RECORD_TYPE)
listener.onMessage("\n--- REC1: " + rec1);
if (buffer2.recordType == ODocument.RECORD_TYPE)
listener.onMessage("\n--- REC2: " + rec2);
listener.onMessage("\n");
++differences;
}
} else {
// CHECK BYTE PER BYTE
for (int b = 0; b < buffer1.buffer.length; ++b) {
if (buffer1.buffer[b] != buffer2.buffer[b]) {
listener.onMessage("\n- ERR: RID=" + clusterId1 + ":" + position + " content is different at byte #" + b + ": " + buffer1.buffer[b] + " <-> " + buffer2.buffer[b]);
listener.onMessage("\n--- REC1: " + new String(buffer1.buffer));
listener.onMessage("\n--- REC2: " + new String(buffer2.buffer));
listener.onMessage("\n");
++differences;
break;
}
}
}
}
}
}
} catch (RuntimeException e) {
OLogManager.instance().error(this, "Error during data comparison of records with rid " + rid);
throw e;
}
}
final OPhysicalPosition[] curPosition = physicalPositions;
physicalPositions = makeDbCall(selectedDatabase, new ODbRelatedCall<OPhysicalPosition[]>() {
@Override
public OPhysicalPosition[] call(ODatabaseDocumentInternal database) {
return database.getStorage().higherPhysicalPositions(clusterId1, curPosition[curPosition.length - 1]);
}
});
if (recordsCounter % 10000 == 0)
listener.onMessage("\n" + recordsCounter + " records were processed for cluster " + clusterName + " ...");
}
listener.onMessage("\nCluster comparison was finished, " + recordsCounter + " records were processed for cluster " + clusterName + " ...");
}
return true;
}
use of edu.umd.cs.findbugs.annotations.SuppressFBWarnings in project java-apns by notnoop.
the class TlsTunnelBuilder method makeTunnel.
@SuppressFBWarnings(value = "VA_FORMAT_STRING_USES_NEWLINE", justification = "use <CR><LF> as according to RFC, not platform-linefeed")
Socket makeTunnel(String host, int port, String proxyUsername, String proxyPassword, InetSocketAddress proxyAddress) throws IOException {
if (host == null || port < 0 || host.isEmpty() || proxyAddress == null) {
throw new ProtocolException("Incorrect parameters to build tunnel.");
}
logger.debug("Creating socket for Proxy : " + proxyAddress.getAddress() + ":" + proxyAddress.getPort());
Socket socket;
try {
ProxyClient client = new ProxyClient();
client.getParams().setParameter("http.useragent", "java-apns");
client.getHostConfiguration().setHost(host, port);
String proxyHost = proxyAddress.getAddress().toString().substring(0, proxyAddress.getAddress().toString().indexOf("/"));
client.getHostConfiguration().setProxy(proxyHost, proxyAddress.getPort());
ProxyClient.ConnectResponse response = client.connect();
socket = response.getSocket();
if (socket == null) {
ConnectMethod method = response.getConnectMethod();
// Read the proxy's HTTP response.
if (method.getStatusLine().getStatusCode() == 407) {
// Proxy server returned 407. We will now try to connect with auth Header
if (proxyUsername != null && proxyPassword != null) {
socket = AuthenticateProxy(method, client, proxyHost, proxyAddress.getPort(), proxyUsername, proxyPassword);
} else {
throw new ProtocolException("Socket not created: " + method.getStatusLine());
}
}
}
} catch (Exception e) {
throw new ProtocolException("Error occurred while creating proxy socket : " + e.toString());
}
if (socket != null) {
logger.debug("Socket for proxy created successfully : " + socket.getRemoteSocketAddress().toString());
}
return socket;
}
use of edu.umd.cs.findbugs.annotations.SuppressFBWarnings in project orientdb by orientechnologies.
the class OLocalHashTable method initHashTreeState.
@SuppressFBWarnings("DLS_DEAD_LOCAL_STORE")
private void initHashTreeState(OAtomicOperation atomicOperation) throws IOException {
truncateFile(atomicOperation, fileId);
for (long pageIndex = 0; pageIndex < MAX_LEVEL_SIZE; pageIndex++) {
final OCacheEntry cacheEntry = addPage(atomicOperation, fileId);
assert cacheEntry.getPageIndex() == pageIndex;
cacheEntry.acquireExclusiveLock();
try {
final OHashIndexBucket<K, V> emptyBucket = new OHashIndexBucket<K, V>(MAX_LEVEL_DEPTH, cacheEntry, keySerializer, valueSerializer, keyTypes, getChanges(atomicOperation, cacheEntry));
} finally {
cacheEntry.releaseExclusiveLock();
releasePage(atomicOperation, cacheEntry);
}
}
final long[] rootTree = new long[MAX_LEVEL_SIZE];
for (int pageIndex = 0; pageIndex < MAX_LEVEL_SIZE; pageIndex++) rootTree[pageIndex] = createBucketPointer(pageIndex);
directory.clear();
directory.addNewNode((byte) 0, (byte) 0, (byte) MAX_LEVEL_DEPTH, rootTree);
OCacheEntry hashStateEntry = loadPage(atomicOperation, fileStateId, hashStateEntryIndex, true);
hashStateEntry.acquireExclusiveLock();
try {
OHashIndexFileLevelMetadataPage metadataPage = new OHashIndexFileLevelMetadataPage(hashStateEntry, getChanges(atomicOperation, hashStateEntry), false);
metadataPage.setRecordsCount(0);
} finally {
hashStateEntry.releaseExclusiveLock();
releasePage(atomicOperation, hashStateEntry);
}
}
use of edu.umd.cs.findbugs.annotations.SuppressFBWarnings in project byte-buddy by raphw.
the class ByteBuddyMojo method processOutputDirectory.
/**
* Processes all class files within the given directory.
*
* @param root The root directory to process.
* @param classPath A list of class path elements expected by the processed classes.
* @throws MojoExecutionException If the user configuration results in an error.
* @throws MojoFailureException If the plugin application raises an error.
* @throws IOException If an I/O exception occurs.
*/
@SuppressFBWarnings(value = "REC_CATCH_EXCEPTION", justification = "Applies Maven exception wrapper")
private void processOutputDirectory(File root, List<? extends String> classPath) throws MojoExecutionException, MojoFailureException, IOException {
if (!root.isDirectory()) {
throw new MojoExecutionException("Target location does not exist or is no directory: " + root);
}
ClassLoaderResolver classLoaderResolver = new ClassLoaderResolver(getLog(), repositorySystem, repositorySystemSession, remoteRepositories);
try {
List<Plugin> plugins = new ArrayList<Plugin>(transformations.size());
for (Transformation transformation : transformations) {
String plugin = transformation.getPlugin();
try {
plugins.add((Plugin) Class.forName(plugin, false, classLoaderResolver.resolve(transformation.asCoordinate(groupId, artifactId, version))).getDeclaredConstructor().newInstance());
getLog().info("Created plugin: " + plugin);
} catch (Exception exception) {
throw new MojoExecutionException("Cannot create plugin: " + transformation.getRawPlugin(), exception);
}
}
EntryPoint entryPoint = (initialization == null ? Initialization.makeDefault() : initialization).getEntryPoint(classLoaderResolver, groupId, artifactId, version);
getLog().info("Resolved entry point: " + entryPoint);
transform(root, entryPoint, classPath, plugins);
} finally {
classLoaderResolver.close();
}
}
Aggregations