use of org.apache.geode.LogWriter in project geode by apache.
the class GMSAuthenticator method invokeAuthenticator.
/**
* Method is package protected to be used in testing.
*/
Principal invokeAuthenticator(Properties securityProps, DistributedMember member, Properties credentials) throws AuthenticationFailedException {
String authMethod = securityProps.getProperty(SECURITY_PEER_AUTHENTICATOR);
org.apache.geode.security.Authenticator auth = null;
try {
auth = SecurityService.getObjectOfType(authMethod, org.apache.geode.security.Authenticator.class);
LogWriter logWriter = this.services.getLogWriter();
LogWriter securityLogWriter = this.services.getSecurityLogWriter();
// this.securityProps contains
auth.init(this.securityProps, logWriter, securityLogWriter);
// is expected
return auth.authenticate(credentials, member);
} catch (GemFireSecurityException gse) {
throw gse;
} catch (Exception ex) {
throw new AuthenticationFailedException(HandShake_FAILED_TO_ACQUIRE_AUTHENTICATOR_OBJECT.toLocalizedString(), ex);
} finally {
if (auth != null)
auth.close();
}
}
use of org.apache.geode.LogWriter in project geode by apache.
the class MiscellaneousCommands method changeLogLevel.
@CliCommand(value = CliStrings.CHANGE_LOGLEVEL, help = CliStrings.CHANGE_LOGLEVEL__HELP)
@CliMetaData(relatedTopic = { CliStrings.TOPIC_LOGS }, interceptor = "org.apache.geode.management.internal.cli.commands.MiscellaneousCommands$ChangeLogLevelInterceptor")
@ResourceOperation(resource = Resource.CLUSTER, operation = Operation.WRITE)
public Result changeLogLevel(@CliOption(key = CliStrings.CHANGE_LOGLEVEL__MEMBER, help = CliStrings.CHANGE_LOGLEVEL__MEMBER__HELP) String[] memberIds, @CliOption(key = CliStrings.CHANGE_LOGLEVEL__GROUPS, unspecifiedDefaultValue = "", help = CliStrings.CHANGE_LOGLEVEL__GROUPS__HELP) String[] grps, @CliOption(key = CliStrings.CHANGE_LOGLEVEL__LOGLEVEL, optionContext = ConverterHint.LOG_LEVEL, mandatory = true, unspecifiedDefaultValue = "", help = CliStrings.CHANGE_LOGLEVEL__LOGLEVEL__HELP) String logLevel) {
try {
if ((memberIds == null || memberIds.length == 0) && (grps == null || grps.length == 0)) {
return ResultBuilder.createUserErrorResult(CliStrings.CHANGE_LOGLEVEL__MSG__SPECIFY_GRP_OR_MEMBER);
}
InternalCache cache = GemFireCacheImpl.getInstance();
LogWriter logger = cache.getLogger();
Set<DistributedMember> dsMembers = new HashSet<DistributedMember>();
Set<DistributedMember> ds = CliUtil.getAllMembers(cache);
if (grps != null && grps.length > 0) {
for (String grp : grps) {
dsMembers.addAll(cache.getDistributedSystem().getGroupMembers(grp));
}
}
if (memberIds != null && memberIds.length > 0) {
for (String member : memberIds) {
Iterator<DistributedMember> it = ds.iterator();
while (it.hasNext()) {
DistributedMember mem = it.next();
if (mem.getName() == null ? false : mem.getName().equals(member) || mem.getId().equals(member)) {
dsMembers.add(mem);
break;
}
}
}
}
if (dsMembers.size() == 0) {
return ResultBuilder.createGemFireErrorResult(CliStrings.CHANGE_LOGLEVEL__MSG_NO_MEMBERS);
}
Function logFunction = new ChangeLogLevelFunction();
FunctionService.registerFunction(logFunction);
Object[] functionArgs = new Object[1];
functionArgs[0] = logLevel;
CompositeResultData compositeResultData = ResultBuilder.createCompositeResultData();
SectionResultData section = compositeResultData.addSection("section");
TabularResultData resultTable = section.addTable("ChangeLogLevel");
resultTable = resultTable.setHeader("Summary");
Execution execution = FunctionService.onMembers(dsMembers).setArguments(functionArgs);
if (execution == null) {
return ResultBuilder.createUserErrorResult(CliStrings.CHANGE_LOGLEVEL__MSG__CANNOT_EXECUTE);
}
List<?> resultList = (List<?>) execution.execute(logFunction).getResult();
for (Object object : resultList) {
try {
if (object instanceof Throwable) {
logger.warning("Exception in ChangeLogLevelFunction " + ((Throwable) object).getMessage(), ((Throwable) object));
continue;
}
if (object != null) {
Map<String, String> resultMap = (Map<String, String>) object;
Entry<String, String> entry = resultMap.entrySet().iterator().next();
if (entry.getValue().contains("ChangeLogLevelFunction exception")) {
resultTable.accumulate(CliStrings.CHANGE_LOGLEVEL__COLUMN_MEMBER, entry.getKey());
resultTable.accumulate(CliStrings.CHANGE_LOGLEVEL__COLUMN_STATUS, "false");
} else {
resultTable.accumulate(CliStrings.CHANGE_LOGLEVEL__COLUMN_MEMBER, entry.getKey());
resultTable.accumulate(CliStrings.CHANGE_LOGLEVEL__COLUMN_STATUS, "true");
}
}
} catch (Exception ex) {
LogWrapper.getInstance().warning("change log level command exception " + ex);
continue;
}
}
Result result = ResultBuilder.buildResult(compositeResultData);
logger.info("change log-level command result=" + result);
return result;
} catch (Exception ex) {
GemFireCacheImpl.getInstance().getLogger().error("GFSH Changeloglevel exception: " + ex);
return ResultBuilder.createUserErrorResult(ex.getMessage());
}
}
use of org.apache.geode.LogWriter in project geode by apache.
the class DataCommands method getQueryRegionsAssociatedMembers.
public static Set<DistributedMember> getQueryRegionsAssociatedMembers(Set<String> regions, final InternalCache cache, boolean returnAll) {
LogWriter logger = cache.getLogger();
Set<DistributedMember> members;
Set<DistributedMember> newMembers = null;
Iterator<String> iterator = regions.iterator();
String region = iterator.next();
members = getRegionAssociatedMembers(region, cache, true);
if (logger.fineEnabled()) {
logger.fine("Members for region " + region + " Members " + members);
}
List<String> regionAndingList = new ArrayList<>();
regionAndingList.add(region);
if (regions.size() == 1) {
newMembers = members;
} else {
if (CollectionUtils.isNotEmpty(members)) {
while (iterator.hasNext()) {
region = iterator.next();
newMembers = getRegionAssociatedMembers(region, cache, true);
if (newMembers == null) {
newMembers = new HashSet<>();
}
if (logger.fineEnabled()) {
logger.fine("Members for region " + region + " Members " + newMembers);
}
regionAndingList.add(region);
newMembers.retainAll(members);
members = newMembers;
if (logger.fineEnabled()) {
logger.fine("Members after anding for regions " + regionAndingList + " List : " + newMembers);
}
}
}
}
members = new HashSet<>();
if (newMembers == null) {
return members;
}
for (DistributedMember newMember : newMembers) {
members.add(newMember);
if (!returnAll) {
return members;
}
}
return members;
}
use of org.apache.geode.LogWriter in project geode by apache.
the class MXMemoryPoolListenerExample method main.
public static void main(String[] args) {
final MemoryMXBean mbean = ManagementFactory.getMemoryMXBean();
final double threshold;
{
double t = 0.8;
if (args.length > 0) {
try {
t = Integer.parseInt(args[0]) / 100;
} catch (NumberFormatException useDefault) {
}
}
if (t < 0.0 || t > 1.0) {
throw new IllegalArgumentException("Theshold must be >= 0 and <= 100");
}
threshold = t;
}
final int percentTenured;
{
int p = 100;
if (args.length > 1) {
try {
p = Integer.parseInt(args[1]);
} catch (NumberFormatException useDefault) {
}
}
if (p > 100 || p < 0) {
throw new IllegalArgumentException("Percent Tenured must be >= 0 and <= 100");
}
percentTenured = p;
}
Properties dsProps = new Properties();
// Loner
dsProps.setProperty(MCAST_PORT, "0");
dsProps.setProperty(ConfigurationProperties.LOG_LEVEL, "info");
dsProps.setProperty(ConfigurationProperties.STATISTIC_SAMPLE_RATE, "200");
dsProps.setProperty(ConfigurationProperties.ENABLE_TIME_STATISTICS, "true");
dsProps.setProperty(ConfigurationProperties.STATISTIC_SAMPLING_ENABLED, "true");
DistributedSystem ds = DistributedSystem.connect(dsProps);
final LogWriter logger = ds.getLogWriter();
logger.info("Usage threshold: " + threshold + "; percent tenured: " + percentTenured + "; Runtime Maximum memory: " + (Runtime.getRuntime().maxMemory() / (1024 * 1024)) + "Mb" + "; Heap Maximum memory: " + (mbean.getHeapMemoryUsage().getMax() / (1024 * 1024)) + "Mb");
MXMemoryPoolListenerExample me = new MXMemoryPoolListenerExample(ds);
// Register this listener to NotificationEmitter
NotificationEmitter emitter = (NotificationEmitter) mbean;
emitter.addNotificationListener(me, null, null);
List<MemoryPoolMXBean> pools = ManagementFactory.getMemoryPoolMXBeans();
for (MemoryPoolMXBean p : pools) {
if (p.isCollectionUsageThresholdSupported()) {
// p.setCollectionUsageThreshold(0);
logger.info("Pool which supports collection usage threshold: " + p.getName() + "; " + p.getCollectionUsage());
}
// On JRockit do not set the usage threshold on the Nursery pool
if (p.getType().equals(MemoryType.HEAP) && p.isUsageThresholdSupported() && !p.getName().startsWith("Nursery")) {
int byteThreshold = (int) Math.ceil(threshold * p.getUsage().getMax());
logger.info("Setting threshold " + (byteThreshold / (1024 * 1024)) + "Mb on: " + p.getName() + "; " + p.getCollectionUsage());
p.setUsageThreshold(byteThreshold);
}
}
final Cache c = CacheFactory.create(ds);
new MemoryHog("hog_1", c, me.critical).consumeMemory(percentTenured).printTenuredSize();
ds.disconnect();
}
use of org.apache.geode.LogWriter in project geode by apache.
the class MultiVMRegionTestCase method testTXNonblockingGetInitialImage.
/**
* Tests that distributed ack operations do not block while another cache is doing a
* getInitialImage.
*/
@Test
public void testTXNonblockingGetInitialImage() throws Exception {
assumeTrue(supportsReplication());
assumeTrue(supportsTransactions());
// don't run this test if global scope since its too difficult to predict
// how many concurrent operations will occur
assumeFalse(getRegionAttributes().getScope().isGlobal());
assumeFalse(getRegionAttributes().getDataPolicy().withPersistence());
final String name = this.getUniqueName();
final byte[][] values = new byte[NB1_NUM_ENTRIES][];
for (int i = 0; i < NB1_NUM_ENTRIES; i++) {
values[i] = new byte[NB1_VALUE_SIZE];
Arrays.fill(values[i], (byte) 0x42);
}
Host host = Host.getHost(0);
VM vm0 = host.getVM(0);
VM vm2 = host.getVM(2);
SerializableRunnable create = new CacheSerializableRunnable("Create Mirrored Region") {
@Override
public void run2() throws CacheException {
beginCacheXml();
{
// root region must be DACK because its used to sync up async subregions
AttributesFactory factory = new AttributesFactory();
factory.setScope(Scope.DISTRIBUTED_ACK);
factory.setDataPolicy(DataPolicy.NORMAL);
factory.setSubscriptionAttributes(new SubscriptionAttributes(InterestPolicy.ALL));
createRootRegion(factory.create());
}
{
AttributesFactory factory = new AttributesFactory(getRegionAttributes());
if (getRegionAttributes().getDataPolicy() == DataPolicy.NORMAL) {
factory.setDataPolicy(DataPolicy.PRELOADED);
}
factory.setSubscriptionAttributes(new SubscriptionAttributes(InterestPolicy.ALL));
createRegion(name, factory.create());
}
finishCacheXml(name);
// reset slow
org.apache.geode.internal.cache.InitialImageOperation.slowImageProcessing = 0;
}
};
vm0.invoke(new CacheSerializableRunnable("Create Nonmirrored Region") {
@Override
public void run2() throws CacheException {
{
// root region must be DACK because its used to sync up async subregions
AttributesFactory factory = new AttributesFactory();
factory.setScope(Scope.DISTRIBUTED_ACK);
factory.setDataPolicy(DataPolicy.EMPTY);
createRootRegion(factory.create());
}
{
AttributesFactory factory = new AttributesFactory(getRegionAttributes());
createRegion(name, factory.create());
}
// reset slow
org.apache.geode.internal.cache.InitialImageOperation.slowImageProcessing = 0;
}
});
vm0.invoke(new CacheSerializableRunnable("Put initial data") {
@Override
public void run2() throws CacheException {
Region region = getRootRegion().getSubregion(name);
for (int i = 0; i < NB1_NUM_ENTRIES; i++) {
region.put(new Integer(i), values[i]);
}
assertEquals(NB1_NUM_ENTRIES, region.keySet().size());
}
});
// start asynchronous process that does updates to the data
AsyncInvocation async = vm0.invokeAsync(new CacheSerializableRunnable("Do Nonblocking Operations") {
@Override
public void run2() throws CacheException {
Region region = getRootRegion().getSubregion(name);
// wait for profile of getInitialImage cache to show up
final org.apache.geode.internal.cache.CacheDistributionAdvisor adv = ((org.apache.geode.internal.cache.DistributedRegion) region).getCacheDistributionAdvisor();
final int expectedProfiles = 1;
WaitCriterion ev = new WaitCriterion() {
@Override
public boolean done() {
DataPolicy currentPolicy = getRegionAttributes().getDataPolicy();
if (currentPolicy == DataPolicy.PRELOADED) {
return (adv.advisePreloadeds().size() + adv.adviseReplicates().size()) >= expectedProfiles;
} else {
return adv.adviseReplicates().size() >= expectedProfiles;
}
}
@Override
public String description() {
return "replicate count never reached " + expectedProfiles;
}
};
Wait.waitForCriterion(ev, 100 * 1000, 200, true);
// operate on every odd entry with different value, alternating between
// updates, invalidates, and destroys. These operations are likely
// to be nonblocking if a sufficient number of updates get through
// before the get initial image is complete.
CacheTransactionManager txMgr = getCache().getCacheTransactionManager();
for (int i = 1; i < NB1_NUM_ENTRIES; i += 2) {
Object key = new Integer(i);
switch(i % 6) {
case // UPDATE
1:
// use the current timestamp so we know when it happened
// we could have used last modification timestamps, but
// this works without enabling statistics
Object value = new Long(System.currentTimeMillis());
txMgr.begin();
region.put(key, value);
txMgr.commit();
// }
break;
case // INVALIDATE
3:
txMgr.begin();
region.invalidate(key);
txMgr.commit();
if (getRegionAttributes().getScope().isDistributedAck()) {
// do a nonblocking netSearch
assertNull(region.get(key));
}
break;
case // DESTROY
5:
txMgr.begin();
region.destroy(key);
txMgr.commit();
if (getRegionAttributes().getScope().isDistributedAck()) {
// do a nonblocking netSearch
assertNull(region.get(key));
}
break;
default:
fail("unexpected modulus result: " + i);
break;
}
}
// add some new keys
for (int i = NB1_NUM_ENTRIES; i < NB1_NUM_ENTRIES + 200; i++) {
txMgr.begin();
region.create(new Integer(i), new Long(System.currentTimeMillis()));
txMgr.commit();
}
// now do a put and our DACK root region which will not complete
// until processed on otherside which means everything done before this
// point has been processed
getRootRegion().put("DONE", "FLUSH_OPS");
}
});
// slow down image processing to make it more likely to get async updates
if (!getRegionAttributes().getScope().isGlobal()) {
vm2.invoke(new SerializableRunnable("Set slow image processing") {
@Override
public void run() {
// if this is a no_ack test, then we need to slow down more because of the
// pauses in the nonblocking operations
int pause = 200;
org.apache.geode.internal.cache.InitialImageOperation.slowImageProcessing = pause;
}
});
}
AsyncInvocation asyncGII = vm2.invokeAsync(create);
if (!getRegionAttributes().getScope().isGlobal()) {
// wait for nonblocking operations to complete
ThreadUtils.join(async, 30 * 1000);
vm2.invoke(new SerializableRunnable("Set fast image processing") {
@Override
public void run() {
org.apache.geode.internal.cache.InitialImageOperation.slowImageProcessing = 0;
}
});
org.apache.geode.test.dunit.LogWriterUtils.getLogWriter().info("after async nonblocking ops complete");
}
// wait for GII to complete
ThreadUtils.join(asyncGII, 30 * 1000);
final long iiComplete = System.currentTimeMillis();
org.apache.geode.test.dunit.LogWriterUtils.getLogWriter().info("Complete GetInitialImage at: " + System.currentTimeMillis());
if (getRegionAttributes().getScope().isGlobal()) {
// wait for nonblocking operations to complete
ThreadUtils.join(async, 30 * 1000);
}
if (async.exceptionOccurred()) {
fail("async failed", async.getException());
}
if (asyncGII.exceptionOccurred()) {
fail("asyncGII failed", asyncGII.getException());
}
// Locally destroy the region in vm0 so we know that they are not found by
// a netSearch
vm0.invoke(new CacheSerializableRunnable("Locally destroy region") {
@Override
public void run2() throws CacheException {
Region region = getRootRegion().getSubregion(name);
region.localDestroyRegion();
}
});
// invoke repeating so noack regions wait for all updates to get processed
vm2.invokeRepeatingIfNecessary(new CacheSerializableRunnable("Verify entryCount") {
boolean entriesDumped = false;
@Override
public void run2() throws CacheException {
Region region = getRootRegion().getSubregion(name);
// expected entry count (subtract entries destroyed)
int entryCount = NB1_NUM_ENTRIES + 200 - NB1_NUM_ENTRIES / 6;
int actualCount = region.entrySet(false).size();
if (actualCount == NB1_NUM_ENTRIES + 200) {
// entries not destroyed, dump entries that were supposed to have been destroyed
dumpDestroyedEntries(region);
}
assertEquals(entryCount, actualCount);
}
private void dumpDestroyedEntries(Region region) throws EntryNotFoundException {
if (entriesDumped)
return;
entriesDumped = true;
LogWriter logger = org.apache.geode.test.dunit.LogWriterUtils.getLogWriter();
logger.info("DUMPING Entries with values in VM that should have been destroyed:");
for (int i = 5; i < NB1_NUM_ENTRIES; i += 6) {
logger.info(i + "-->" + ((org.apache.geode.internal.cache.LocalRegion) region).getValueInVM(new Integer(i)));
}
}
}, 5000);
vm2.invoke(new CacheSerializableRunnable("Verify keys/values & Nonblocking") {
@Override
public void run2() throws CacheException {
Region region = getRootRegion().getSubregion(name);
// expected entry count (subtract entries destroyed)
int entryCount = NB1_NUM_ENTRIES + 200 - NB1_NUM_ENTRIES / 6;
assertEquals(entryCount, region.entrySet(false).size());
// determine how many entries were updated before getInitialImage
// was complete
int numConcurrent = 0;
for (int i = 0; i < NB1_NUM_ENTRIES + 200; i++) {
Region.Entry entry = region.getEntry(new Integer(i));
Object v = entry == null ? null : entry.getValue();
if (i < NB1_NUM_ENTRIES) {
// old keys
switch(i % 6) {
// even keys are originals
case 0:
case 2:
case 4:
assertNotNull(entry);
assertTrue(Arrays.equals(values[i], (byte[]) v));
break;
case // updated
1:
assertNotNull(v);
assertTrue("Value for key " + i + " is not a Long, is a " + v.getClass().getName(), v instanceof Long);
Long timestamp = (Long) entry.getValue();
if (timestamp.longValue() < iiComplete) {
numConcurrent++;
}
break;
case // invalidated
3:
assertNotNull(entry);
assertNull("Expected value for " + i + " to be null, but was " + v, v);
break;
case // destroyed
5:
assertNull(entry);
break;
default:
fail("unexpected modulus result: " + (i % 6));
break;
}
} else {
// new keys
assertNotNull(v);
assertTrue("Value for key " + i + " is not a Long, is a " + v.getClass().getName(), v instanceof Long);
Long timestamp = (Long) entry.getValue();
if (timestamp.longValue() < iiComplete) {
numConcurrent++;
}
}
}
org.apache.geode.test.dunit.LogWriterUtils.getLogWriter().info(name + ": " + numConcurrent + " entries out of " + entryCount + " were updated concurrently with getInitialImage");
// make sure at least some of them were concurrent
{
int min = 30;
assertTrue("Not enough updates concurrent with getInitialImage occurred to my liking. " + numConcurrent + " entries out of " + entryCount + " were updated concurrently with getInitialImage, and I'd expect at least " + min + " or so", numConcurrent >= min);
}
}
});
}
Aggregations