use of org.apache.geode.internal.cache.InternalCache in project geode by apache.
the class InternalDataSerializer method _register.
public static DataSerializer _register(DataSerializer s, boolean distribute) {
final int id = s.getId();
DataSerializer dsForMarkers = s;
if (id == 0) {
throw new IllegalArgumentException(LocalizedStrings.InternalDataSerializer_CANNOT_CREATE_A_DATASERIALIZER_WITH_ID_0.toLocalizedString());
}
final Class[] classes = s.getSupportedClasses();
if (classes == null || classes.length == 0) {
final StringId msg = LocalizedStrings.InternalDataSerializer_THE_DATASERIALIZER_0_HAS_NO_SUPPORTED_CLASSES_ITS_GETSUPPORTEDCLASSES_METHOD_MUST_RETURN_AT_LEAST_ONE_CLASS;
throw new IllegalArgumentException(msg.toLocalizedString(s.getClass().getName()));
}
for (Class aClass : classes) {
if (aClass == null) {
final StringId msg = LocalizedStrings.InternalDataSerializer_THE_DATASERIALIZER_GETSUPPORTEDCLASSES_METHOD_FOR_0_RETURNED_AN_ARRAY_THAT_CONTAINED_A_NULL_ELEMENT;
throw new IllegalArgumentException(msg.toLocalizedString(s.getClass().getName()));
} else if (aClass.isArray()) {
final StringId msg = LocalizedStrings.InternalDataSerializer_THE_DATASERIALIZER_GETSUPPORTEDCLASSES_METHOD_FOR_0_RETURNED_AN_ARRAY_THAT_CONTAINED_AN_ARRAY_CLASS_WHICH_IS_NOT_ALLOWED_SINCE_ARRAYS_HAVE_BUILTIN_SUPPORT;
throw new IllegalArgumentException(msg.toLocalizedString(s.getClass().getName()));
}
}
final Integer idx = id;
boolean retry;
Marker oldMarker = null;
final Marker m = new InitMarker();
do {
retry = false;
Object oldSerializer = idsToSerializers.putIfAbsent(idx, m);
if (oldSerializer != null) {
if (oldSerializer instanceof Marker) {
retry = !idsToSerializers.replace(idx, oldSerializer, m);
if (!retry) {
oldMarker = (Marker) oldSerializer;
}
} else if (oldSerializer.getClass().equals(s.getClass())) {
// We've already got one of these registered
if (distribute) {
sendRegistrationMessage(s);
}
return (DataSerializer) oldSerializer;
} else {
DataSerializer other = (DataSerializer) oldSerializer;
throw new IllegalStateException(LocalizedStrings.InternalDataSerializer_A_DATASERIALIZER_OF_CLASS_0_IS_ALREADY_REGISTERED_WITH_ID_1_SO_THE_DATASERIALIZER_OF_CLASS_2_COULD_NOT_BE_REGISTERED.toLocalizedString(new Object[] { other.getClass().getName(), other.getId() }));
}
}
} while (retry);
try {
for (int i = 0; i < classes.length; i++) {
DataSerializer oldS = classesToSerializers.putIfAbsent(classes[i].getName(), s);
if (oldS != null) {
if (!s.equals(oldS)) {
// cleanup the ones we have already added
for (int j = 0; j < i; j++) {
classesToSerializers.remove(classes[j].getName(), s);
}
dsForMarkers = null;
String oldMsg;
if (oldS.getId() == 0) {
oldMsg = "DataSerializer has built-in support for class ";
} else {
oldMsg = "A DataSerializer of class " + oldS.getClass().getName() + " is already registered to support class ";
}
String msg = oldMsg + classes[i].getName() + " so the DataSerializer of class " + s.getClass().getName() + " could not be registered.";
if (oldS.getId() == 0) {
throw new IllegalArgumentException(msg);
} else {
throw new IllegalStateException(msg);
}
}
}
}
} finally {
if (dsForMarkers == null) {
idsToSerializers.remove(idx, m);
} else {
idsToSerializers.replace(idx, m, dsForMarkers);
}
if (oldMarker != null) {
oldMarker.setSerializer(dsForMarkers);
}
m.setSerializer(dsForMarkers);
}
// if dataserializer is getting registered for first time
// its EventID will be null, so generate a new event id
// the the distributed system is connected
InternalCache cache = GemFireCacheImpl.getInstance();
if (cache != null && s.getEventId() == null) {
s.setEventId(new EventID(cache.getDistributedSystem()));
}
if (distribute) {
// send a message to other peers telling them about a newly-registered
// dataserializer, it also send event id of the originator along with the
// dataserializer
sendRegistrationMessage(s);
// send it to cache servers if it is a client
sendRegistrationMessageToServers(s);
}
// send it to all cache clients irrelevant of distribute
// bridge servers send it all the clients irrelevant of
// originator VM
sendRegistrationMessageToClients(s);
fireNewDataSerializer(s);
return s;
}
use of org.apache.geode.internal.cache.InternalCache in project geode by apache.
the class ClientStatsManager method publishClientStats.
/**
* This method publishes the client stats using the admin region.
*
* @param pool Connection pool which may be used for admin region.
*/
public static synchronized void publishClientStats(PoolImpl pool) {
InternalCache currentCache = GemFireCacheImpl.getInstance();
if (!initializeStatistics(currentCache)) {
// handles null case too
return;
}
LogWriterI18n logger = currentCache.getLoggerI18n();
if (logger.fineEnabled())
logger.fine("Entering ClientStatsManager#publishClientStats...");
ClientHealthStats stats = getClientHealthStats(currentCache, pool);
try {
InternalDistributedSystem ds = (InternalDistributedSystem) currentCache.getDistributedSystem();
ServerRegionProxy regionProxy = new ServerRegionProxy(ClientHealthMonitoringRegion.ADMIN_REGION_NAME, pool);
EventID eventId = new EventID(ds);
@Released EntryEventImpl event = new EntryEventImpl((Object) null);
try {
event.setEventId(eventId);
regionProxy.putForMetaRegion(ds.getMemberId(), stats, null, event, null, true);
} finally {
event.release();
}
} catch (DistributedSystemDisconnectedException e) {
throw e;
} catch (CacheWriterException cwx) {
pool.getCancelCriterion().checkCancelInProgress(cwx);
currentCache.getCancelCriterion().checkCancelInProgress(cwx);
// TODO: Need to analyze these exception scenarios.
logger.warning(LocalizedStrings.ClientStatsManager_FAILED_TO_SEND_CLIENT_HEALTH_STATS_TO_CACHESERVER, cwx);
} catch (Exception e) {
pool.getCancelCriterion().checkCancelInProgress(e);
currentCache.getCancelCriterion().checkCancelInProgress(e);
logger.info(LocalizedStrings.ClientStatsManager_FAILED_TO_PUBLISH_CLIENT_STATISTICS, e);
}
if (logger.fineEnabled()) {
logger.fine("Exiting ClientStatsManager#publishClientStats.");
}
}
use of org.apache.geode.internal.cache.InternalCache in project geode by apache.
the class InternalDataSerializer method readPdxEnum.
/**
* @throws IOException since 6.6.2
*/
private static Object readPdxEnum(DataInput in) throws IOException {
int dsId = in.readByte();
int tmp = readArrayLength(in);
int enumId = dsId << 24 | tmp & 0xFFFFFF;
if (logger.isTraceEnabled(LogMarker.SERIALIZER)) {
logger.trace(LogMarker.SERIALIZER, "read PdxEnum id={}", enumId);
}
InternalCache internalCache = GemFireCacheImpl.getForPdx("PDX registry is unavailable because the Cache has been closed.");
TypeRegistry tr = internalCache.getPdxRegistry();
Object result = tr.getEnumById(enumId);
if (result instanceof PdxInstance) {
getDMStats(internalCache).incPdxInstanceCreations();
}
return result;
}
use of org.apache.geode.internal.cache.InternalCache in project geode by apache.
the class InternalDataSerializer method autoSerialized.
public static boolean autoSerialized(Object o, DataOutput out) throws IOException {
AutoSerializableManager asm = TypeRegistry.getAutoSerializableManager();
if (asm != null) {
AutoClassInfo aci = asm.getExistingClassInfo(o.getClass());
if (aci != null) {
InternalCache internalCache = GemFireCacheImpl.getForPdx("PDX registry is unavailable because the Cache has been closed.");
TypeRegistry tr = internalCache.getPdxRegistry();
PdxOutputStream os;
if (out instanceof HeapDataOutputStream) {
os = new PdxOutputStream((HeapDataOutputStream) out);
} else {
os = new PdxOutputStream();
}
PdxWriterImpl writer = new PdxWriterImpl(tr, o, aci, os);
try {
if (is662SerializationEnabled()) {
boolean alreadyInProgress = isPdxSerializationInProgress();
if (!alreadyInProgress) {
setPdxSerializationInProgress(true);
try {
asm.writeData(writer, o, aci);
} finally {
setPdxSerializationInProgress(false);
}
} else {
asm.writeData(writer, o, aci);
}
} else {
asm.writeData(writer, o, aci);
}
} catch (ToDataException | CancelException | NonPortableClassException | GemFireRethrowable ex) {
throw ex;
} catch (VirtualMachineError err) {
SystemFailure.initiateFailure(err);
// now, so don't let this thread continue.
throw err;
} catch (Throwable t) {
// Whenever you catch Error or Throwable, you must also
// catch VirtualMachineError (see above). However, there is
// _still_ a possibility that you are dealing with a cascading
// error condition, so you also need to check to see if the JVM
// is still usable:
SystemFailure.checkFailure();
throw new ToDataException("PdxSerializer failed when calling toData on " + o.getClass(), t);
}
int bytesWritten = writer.completeByteStreamGeneration();
getDMStats(internalCache).incPdxSerialization(bytesWritten);
if (!(out instanceof HeapDataOutputStream)) {
writer.sendTo(out);
}
return true;
}
}
return false;
}
use of org.apache.geode.internal.cache.InternalCache in project geode by apache.
the class InternalInstantiator method getInstantiators.
/**
* Returns all of the currently registered instantiators
*/
public static Instantiator[] getInstantiators() {
Collection coll = new ArrayList();
if (!classNamesToHolders.isEmpty()) {
Iterator it = classNamesToHolders.values().iterator();
while (it.hasNext()) {
try {
InstantiatorAttributesHolder holder = (InstantiatorAttributesHolder) it.next();
Class instantiatorClass = InternalDataSerializer.getCachedClass(holder.getInstantiatorClassName());
Class instantiatedClass = InternalDataSerializer.getCachedClass(holder.getInstantiatedClassName());
synchronized (InternalInstantiator.class) {
if (!idsToInstantiators.containsKey(holder.getId())) {
register(instantiatorClass, instantiatedClass, holder.getId(), false, holder.getEventId(), holder.getContext());
}
classNamesToHolders.remove(holder.getInstantiatedClassName());
idsToHolders.remove(holder.getId());
}
} catch (ClassNotFoundException cnfe) {
InternalCache cache = GemFireCacheImpl.getInstance();
if (cache != null && cache.getLoggerI18n() != null && cache.getLoggerI18n().infoEnabled()) {
cache.getLoggerI18n().info(LocalizedStrings.InternalInstantiator_COULD_NOT_LOAD_INSTANTIATOR_CLASS_0, new Object[] { cnfe.getMessage() });
}
}
}
}
// Don't move it before the if block above.
coll.addAll(dsMap.values());
return (Instantiator[]) coll.toArray(new Instantiator[coll.size()]);
}
Aggregations