use of org.apache.geode.InternalGemFireError in project geode by apache.
the class PeerTypeRegistration method getExistingIdForType.
/** Should be called holding the dlock */
private int getExistingIdForType(PdxType newType) {
int totalPdxTypeIdInDS = 0;
TXStateProxy currentState = suspendTX();
try {
int result = -1;
for (Map.Entry<Object, Object> entry : getIdToType().entrySet()) {
Object v = entry.getValue();
Object k = entry.getKey();
if (k instanceof EnumId) {
EnumId id = (EnumId) k;
EnumInfo info = (EnumInfo) v;
enumToId.put(info, id);
} else {
PdxType foundType = (PdxType) v;
Integer id = (Integer) k;
int tmpDsId = PLACE_HOLDER_FOR_DS_ID & id;
if (tmpDsId == this.dsId) {
totalPdxTypeIdInDS++;
}
typeToId.put(foundType, id);
if (foundType.equals(newType)) {
result = foundType.getTypeId();
}
}
}
if (totalPdxTypeIdInDS == this.maxTypeId) {
throw new InternalGemFireError("Used up all of the PDX type ids for this distributed system. The maximum number of PDX types is " + this.maxTypeId);
}
return result;
} finally {
resumeTX(currentState);
}
}
use of org.apache.geode.InternalGemFireError in project geode by apache.
the class PeerTypeRegistration method allocateEnumId.
private EnumId allocateEnumId(EnumInfo ei) {
TXStateProxy currentState = suspendTX();
Region<Object, Object> r = getIdToType();
int id = ei.hashCode() & PLACE_HOLDER_FOR_TYPE_ID;
int newEnumId = id | this.dsId;
try {
int maxTry = this.maxTypeId;
// Find the next available type id.
while (r.get(new EnumId(newEnumId)) != null) {
maxTry--;
if (maxTry == 0) {
throw new InternalGemFireError("Used up all of the PDX type ids for this distributed system. The maximum number of PDX types is " + this.maxTypeId);
}
// Find the next available type id.
id++;
if (id > this.maxTypeId) {
id = 1;
}
newEnumId = id | this.dsId;
}
return new EnumId(newEnumId);
} finally {
resumeTX(currentState);
}
}
use of org.apache.geode.InternalGemFireError in project geode by apache.
the class LuceneEventListener method process.
protected boolean process(final List<AsyncEvent> events) {
// Try to get a PDX instance if possible, rather than a deserialized object
DefaultQuery.setPdxReadSerialized(true);
Set<IndexRepository> affectedRepos = new HashSet<IndexRepository>();
try {
for (AsyncEvent event : events) {
Region region = event.getRegion();
Object key = event.getKey();
Object callbackArgument = event.getCallbackArgument();
IndexRepository repository = repositoryManager.getRepository(region, key, callbackArgument);
Object value = getValue(region.getEntry(key));
if (value != null) {
repository.update(key, value);
} else {
repository.delete(key);
}
affectedRepos.add(repository);
}
for (IndexRepository repo : affectedRepos) {
repo.commit();
}
return true;
} catch (BucketNotFoundException | RegionDestroyedException | PrimaryBucketException e) {
logger.debug("Bucket not found while saving to lucene index: " + e.getMessage(), e);
return false;
} catch (CacheClosedException e) {
logger.debug("Unable to save to lucene index, cache has been closed", e);
return false;
} catch (AlreadyClosedException e) {
logger.debug("Unable to commit, the lucene index is already closed", e);
return false;
} catch (IOException e) {
throw new InternalGemFireError("Unable to save to lucene index", e);
} finally {
DefaultQuery.setPdxReadSerialized(false);
}
}
Aggregations