use of org.apache.geode.i18n.LogWriterI18n in project geode by apache.
the class PdxQueryCQTestBase method printResults.
public void printResults(SelectResults results, String message) {
Object r;
Struct s;
LogWriterI18n logger = GemFireCacheImpl.getInstance().getLoggerI18n();
logger.fine(message);
int row = 0;
for (Iterator iter = results.iterator(); iter.hasNext(); ) {
r = iter.next();
row++;
if (r instanceof Struct) {
s = (Struct) r;
String[] fieldNames = ((Struct) r).getStructType().getFieldNames();
for (int i = 0; i < fieldNames.length; i++) {
logger.fine("### Row " + row + "\n" + "Field: " + fieldNames[i] + " > " + s.get(fieldNames[i]).toString());
}
} else {
logger.fine("#### Row " + row + "\n" + r);
}
}
}
use of org.apache.geode.i18n.LogWriterI18n in project geode by apache.
the class DistributedTransactionDUnitTest method testTransactionalKeyBasedUpdates.
@Test
public void testTransactionalKeyBasedUpdates() throws Exception {
Host host = Host.getHost(0);
VM server1 = host.getVM(0);
VM server2 = host.getVM(1);
VM server3 = host.getVM(2);
createPR(new VM[] { server1, server2, server3 });
execute(server1, new SerializableCallable() {
@Override
public Object call() throws Exception {
CacheTransactionManager mgr = getGemfireCache().getTxManager();
mgr.setDistributed(true);
// mgr.begin();
LogWriterI18n logger = getGemfireCache().getLoggerI18n();
Region<CustId, Customer> custPR = getCache().getRegion(CUSTOMER_PR);
for (int i = 1; i <= 2; i++) {
mgr.begin();
logger.fine("TEST:PUT-" + i);
custPR.put(new CustId(i), new Customer("name" + i, "addr" + i));
logger.fine("TEST:COMMIT-" + i);
mgr.commit();
}
// Updates
for (int i = 1; i <= 2; i++) {
CustId custId = new CustId(i);
Customer customer = custPR.get(custId);
assertNotNull(customer);
mgr.begin();
logger.fine("TEST:UPDATE-" + i);
custPR.put(custId, new Customer("name" + i * 2, "addr" + i * 2));
logger.fine("TEST:UPDATED-" + i + "=" + custId + "," + custPR.get(custId));
logger.fine("TEST:UPDATE COMMIT-" + i);
mgr.commit();
logger.fine("TEST:POSTCOMMIT-" + i + "=" + custId + "," + custPR.get(custId));
}
// Verify
for (int i = 1; i <= 2; i++) {
CustId custId = new CustId(i);
Customer customer = custPR.get(custId);
assertNotNull(customer);
logger.fine("TEST:VERIFYING-" + i);
assertEquals(new Customer("name" + i * 2, "addr" + i * 2), customer);
}
return null;
}
});
}
use of org.apache.geode.i18n.LogWriterI18n in project geode by apache.
the class OplogRVVJUnitTest method testRecoverRVV.
@Test
public void testRecoverRVV() throws UnknownHostException {
final DiskInitFile df = context.mock(DiskInitFile.class);
final LogWriterI18n logger = context.mock(LogWriterI18n.class);
final GemFireCacheImpl cache = context.mock(GemFireCacheImpl.class);
// Create a mock disk store impl.
final DiskStoreImpl parent = context.mock(DiskStoreImpl.class);
final StatisticsFactory sf = context.mock(StatisticsFactory.class);
final DiskStoreID ownerId = DiskStoreID.random();
final DiskStoreID m1 = DiskStoreID.random();
final DiskStoreID m2 = DiskStoreID.random();
final DiskRecoveryStore drs = context.mock(DiskRecoveryStore.class);
context.checking(new Expectations() {
{
ignoring(sf);
allowing(df).getOrCreateCanonicalId(m1);
will(returnValue(1));
allowing(df).getOrCreateCanonicalId(m2);
will(returnValue(2));
allowing(df).getOrCreateCanonicalId(ownerId);
will(returnValue(3));
allowing(df).getCanonicalObject(1);
will(returnValue(m1));
allowing(df).getCanonicalObject(2);
will(returnValue(m2));
allowing(df).getCanonicalObject(3);
will(returnValue(ownerId));
ignoring(df);
}
});
DirectoryHolder dirHolder = new DirectoryHolder(sf, testDirectory, 0, 0);
context.checking(new Expectations() {
{
ignoring(logger);
allowing(cache).getLoggerI18n();
will(returnValue(logger));
allowing(cache).cacheTimeMillis();
will(returnValue(System.currentTimeMillis()));
allowing(parent).getCache();
will(returnValue(cache));
allowing(parent).getMaxOplogSizeInBytes();
will(returnValue(10000L));
allowing(parent).getName();
will(returnValue("test"));
allowing(parent).getStats();
will(returnValue(new DiskStoreStats(sf, "stats")));
allowing(parent).getDiskInitFile();
will(returnValue(df));
allowing(parent).getDiskStoreID();
will(returnValue(DiskStoreID.random()));
}
});
final DiskRegionVersionVector rvv = new DiskRegionVersionVector(ownerId);
rvv.recordVersion(m1, 0);
rvv.recordVersion(m1, 1);
rvv.recordVersion(m1, 2);
rvv.recordVersion(m1, 10);
rvv.recordVersion(m1, 7);
rvv.recordVersion(m2, 0);
rvv.recordVersion(m2, 1);
rvv.recordVersion(m2, 2);
rvv.recordGCVersion(m1, 1);
rvv.recordGCVersion(m2, 0);
// create the oplog
final AbstractDiskRegion diskRegion = context.mock(AbstractDiskRegion.class);
final PersistentOplogSet oplogSet = context.mock(PersistentOplogSet.class);
final Map<Long, AbstractDiskRegion> map = new HashMap<Long, AbstractDiskRegion>();
map.put(5L, diskRegion);
context.checking(new Expectations() {
{
allowing(diskRegion).getRegionVersionVector();
will(returnValue(rvv));
allowing(diskRegion).getRVVTrusted();
will(returnValue(true));
allowing(parent).getAllDiskRegions();
will(returnValue(map));
allowing(oplogSet).getCurrentlyRecovering(5L);
will(returnValue(drs));
allowing(oplogSet).getParent();
will(returnValue(parent));
ignoring(oplogSet);
ignoring(parent);
allowing(diskRegion).getFlags();
will(returnValue(EnumSet.of(DiskRegionFlag.IS_WITH_VERSIONING)));
}
});
Map<Long, AbstractDiskRegion> regions = parent.getAllDiskRegions();
Oplog oplog = new Oplog(1, oplogSet, dirHolder);
oplog.close();
context.checking(new Expectations() {
{
one(drs).recordRecoveredGCVersion(m1, 1);
one(drs).recordRecoveredGCVersion(m2, 0);
one(drs).recordRecoveredVersonHolder(ownerId, rvv.getMemberToVersion().get(ownerId), true);
one(drs).recordRecoveredVersonHolder(m1, rvv.getMemberToVersion().get(m1), true);
one(drs).recordRecoveredVersonHolder(m2, rvv.getMemberToVersion().get(m2), true);
one(drs).setRVVTrusted(true);
}
});
oplog = new Oplog(1, oplogSet);
Collection<File> drfFiles = FileUtils.listFiles(testDirectory, new String[] { "drf" }, true);
assertEquals(1, drfFiles.size());
Collection<File> crfFiles = FileUtils.listFiles(testDirectory, new String[] { "crf" }, true);
assertEquals(1, crfFiles.size());
oplog.addRecoveredFile(drfFiles.iterator().next(), dirHolder);
oplog.addRecoveredFile(crfFiles.iterator().next(), dirHolder);
OplogEntryIdSet deletedIds = new OplogEntryIdSet();
oplog.recoverDrf(deletedIds, false, true);
oplog.recoverCrf(deletedIds, true, true, false, Collections.singleton(oplog), true);
context.assertIsSatisfied();
}
use of org.apache.geode.i18n.LogWriterI18n in project geode by apache.
the class MsgDestreamer method getLogger.
private static LogWriterI18n getLogger() {
LogWriterI18n result = null;
InternalDistributedSystem ids = InternalDistributedSystem.unsafeGetConnectedInstance();
if (ids != null) {
result = ids.getLogWriter().convertToLogWriterI18n();
}
return result;
}
use of org.apache.geode.i18n.LogWriterI18n in project geode by apache.
the class TransactionImpl method delistResource.
/**
* Disassociate the resource specified from the global transaction. associated with this
* transaction
*
* @param xaRes XAResource to be delisted
* @param flag One of the values of TMSUCCESS, TMSUSPEND, or TMFAIL.
* @return true, if resource was delisted successfully, otherwise false.
* @throws SystemException Thrown if the transaction manager encounters an unexpected error
* condition.
* @throws IllegalStateException Thrown if the transaction in the target object is not active.
*
* @see javax.transaction.Transaction#delistResource(javax.transaction.xa.XAResource, int)
*/
public boolean delistResource(XAResource xaRes, int flag) throws IllegalStateException, SystemException {
gtx = tm.getGlobalTransaction();
if (gtx == null) {
String exception = LocalizedStrings.TransactionImpl_TRANSACTIONIMPL_DELISTRESOURCE_NO_GLOBAL_TRANSACTION_EXISTS.toLocalizedString();
LogWriterI18n writer = TransactionUtils.getLogWriterI18n();
if (writer.fineEnabled())
writer.fine(exception);
throw new SystemException(exception);
}
return gtx.delistResource(xaRes, flag);
}
Aggregations