use of org.apache.geode.cache.DiskAccessException in project geode by apache.
the class DiskInitFile method openRAF2.
private void openRAF2() {
try {
this.ifRAF = new RandomAccessFile(this.ifFile, getFileMode());
long len = this.ifRAF.length();
if (len != 0) {
// this.ifRAF.seek(len);
if (this.gotEOF) {
this.ifRAF.seek(this.nextSeekPosition - 1);
} else {
this.ifRAF.seek(this.nextSeekPosition);
}
} else {
// pre-allocate the if file using some percentage of max Oplog size but
// with max of 10M and min of 1M
long maxSizeInMB = Math.min(Math.max(this.parent.getMaxOplogSize() / 200L, 1L), 10L);
byte[] buffer = new byte[(1024 * 1024)];
for (int i = 0; i < maxSizeInMB; i++) {
this.ifRAF.write(buffer);
}
this.ifRAF.seek(0L);
}
} catch (IOException ex) {
throw new DiskAccessException(LocalizedStrings.DiskRegion_COULD_NOT_OPEN_0.toLocalizedString(this.ifFile.getPath()), ex, this.parent);
}
}
use of org.apache.geode.cache.DiskAccessException in project geode by apache.
the class DiskInitFile method writeCanonicalId.
private void writeCanonicalId(int id, Object object) {
try {
HeapDataOutputStream hdos = new HeapDataOutputStream(32, Version.CURRENT);
hdos.write(IFREC_ADD_CANONICAL_MEMBER_ID);
hdos.writeInt(id);
DataSerializer.writeObject(object, hdos);
hdos.write(END_OF_RECORD_ID);
writeIFRecord(hdos, true);
} catch (IOException ex) {
DiskAccessException dae = new DiskAccessException(LocalizedStrings.DiskInitFile_FAILED_INIT_FILE_WRITE_BECAUSE_0.toLocalizedString(ex), this.parent);
if (!this.compactInProgress) {
this.parent.handleDiskAccessException(dae);
}
throw dae;
}
}
use of org.apache.geode.cache.DiskAccessException in project geode by apache.
the class DiskInitFile method writePRDestroy.
private void writePRDestroy(String name) {
try {
int nameLength = estimateByteSize(name);
HeapDataOutputStream hdos = new HeapDataOutputStream(1 + nameLength + 4 + 1, Version.CURRENT);
hdos.write(IFREC_PR_DESTROY);
hdos.writeUTF(name);
hdos.write(END_OF_RECORD_ID);
writeIFRecord(hdos, false);
} catch (IOException ex) {
DiskAccessException dae = new DiskAccessException(LocalizedStrings.DiskInitFile_FAILED_INIT_FILE_WRITE_BECAUSE_0.toLocalizedString(ex), this.parent);
if (!this.compactInProgress) {
this.parent.handleDiskAccessException(dae);
}
throw dae;
}
}
use of org.apache.geode.cache.DiskAccessException in project geode by apache.
the class DiskInitFile method saveDataSerializer.
/**
* Write the specified DataSerializer to the file.
*/
private void saveDataSerializer(DataSerializer ds) {
lock.lock();
try {
if (!this.compactInProgress && this.dsIds.contains(ds.getId())) {
// dataSerializer already written to disk so just return
return;
}
final byte[] classNameBytes = classToBytes(ds.getClass());
ByteBuffer bb = getIFWriteBuffer(1 + 4 + classNameBytes.length + 1);
bb.put(IFREC_DATA_SERIALIZER_ID);
bb.putInt(classNameBytes.length);
bb.put(classNameBytes);
bb.put(END_OF_RECORD_ID);
writeIFRecord(bb);
} catch (IOException ex) {
throw new DiskAccessException(LocalizedStrings.DiskInitFile_FAILED_SAVING_DATA_SERIALIZER_TO_DISK_BECAUSE_0.toLocalizedString(ex), this.parent);
} finally {
lock.unlock();
}
}
use of org.apache.geode.cache.DiskAccessException in project geode by apache.
the class DiskInitFile method writePRCreate.
private void writePRCreate(String name, PRPersistentConfig config) {
try {
int nameLength = estimateByteSize(name);
String colocatedWith = config.getColocatedWith();
colocatedWith = colocatedWith == null ? "" : colocatedWith;
int colocatedLength = estimateByteSize(colocatedWith);
HeapDataOutputStream hdos = new HeapDataOutputStream(1 + nameLength + 4 + colocatedLength + 1, Version.CURRENT);
hdos.write(IFREC_PR_CREATE);
hdos.writeUTF(name);
hdos.writeInt(config.getTotalNumBuckets());
hdos.writeUTF(colocatedWith);
hdos.write(END_OF_RECORD_ID);
writeIFRecord(hdos, false);
} catch (IOException ex) {
DiskAccessException dae = new DiskAccessException(LocalizedStrings.DiskInitFile_FAILED_INIT_FILE_WRITE_BECAUSE_0.toLocalizedString(ex), this.parent);
if (!this.compactInProgress) {
this.parent.handleDiskAccessException(dae);
}
throw dae;
}
}
Aggregations