use of org.fusesource.hawtbuf.Buffer in project hawtjournal by fusesource.
the class DataFileAccessor method readLocation.
Buffer readLocation(Location location) throws IOException {
WriteCommand asyncWrite = journal.getInflightWrites().get(location);
Buffer result = null;
if (asyncWrite != null) {
result = asyncWrite.getData();
} else {
RandomAccessFile raf = getOrCreateRaf(Thread.currentThread(), location.getDataFileId());
Lock threadLock = getOrCreateLock(Thread.currentThread(), location.getDataFileId());
accessorLock.lock();
threadLock.lock();
try {
if (location.getSize() == Location.NOT_SET) {
raf.seek(location.getOffset());
location.setSize(raf.readInt());
location.setType(raf.readByte());
} else {
raf.seek(Journal.HEADER_SIZE + location.getOffset());
}
if (location.isBatchControlRecord()) {
byte[] data = new byte[raf.readInt()];
raf.readFully(data);
result = new Buffer(data, 0, data.length);
} else {
byte[] data = new byte[location.getSize() - Journal.HEADER_SIZE];
raf.readFully(data);
result = new Buffer(data, 0, data.length);
}
} catch (RuntimeException e) {
throw new IOException("Invalid location: " + location + ", : " + e);
} finally {
threadLock.unlock();
accessorLock.unlock();
}
}
if (!location.isDeletedRecord()) {
return result;
} else {
throw new IOException("Deleted location: " + location);
}
}
use of org.fusesource.hawtbuf.Buffer in project hawtjournal by fusesource.
the class Journal method compactDataFile.
private void compactDataFile(DataFile currentFile, Location firstUserLocation) throws IOException {
DataFile tmpFile = new DataFile(new File(currentFile.getFile().getParent(), filePrefix + currentFile.getDataFileId() + ".tmp" + fileSuffix), currentFile.getDataFileId());
RandomAccessFile raf = tmpFile.openRandomAccessFile();
try {
Location currentUserLocation = firstUserLocation;
WriteBatch batch = new WriteBatch(tmpFile, 0);
batch.prepareBatch();
while (currentUserLocation != null) {
Buffer data = accessor.readLocation(currentUserLocation);
WriteCommand write = new WriteCommand(new Location(currentUserLocation), data, true);
batch.appendBatch(write);
currentUserLocation = goToNextLocation(currentUserLocation, Location.USER_RECORD_TYPE, false);
}
batch.perform(raf, null, true);
} finally {
if (raf != null) {
raf.close();
}
}
if (currentFile.getFile().delete()) {
accessor.dispose(currentFile);
totalLength.addAndGet(-currentFile.getLength());
totalLength.addAndGet(tmpFile.getLength());
if (tmpFile.getFile().renameTo(currentFile.getFile())) {
currentFile.setLength(tmpFile.getLength());
} else {
throw new IOException("Cannot rename file: " + tmpFile.getFile());
}
} else {
throw new IOException("Cannot remove file: " + currentFile.getFile());
}
}
use of org.fusesource.hawtbuf.Buffer in project camel by apache.
the class LevelDBAggregateNotLostTest method testLevelDBAggregateNotLost.
@Test
public void testLevelDBAggregateNotLost() throws Exception {
getMockEndpoint("mock:aggregated").expectedBodiesReceived("ABCDE");
getMockEndpoint("mock:result").expectedMessageCount(0);
template.sendBodyAndHeader("direct:start", "A", "id", 123);
template.sendBodyAndHeader("direct:start", "B", "id", 123);
template.sendBodyAndHeader("direct:start", "C", "id", 123);
template.sendBodyAndHeader("direct:start", "D", "id", 123);
template.sendBodyAndHeader("direct:start", "E", "id", 123);
assertMockEndpointsSatisfied(30, TimeUnit.SECONDS);
Thread.sleep(1000);
String exchangeId = getMockEndpoint("mock:aggregated").getReceivedExchanges().get(0).getExchangeId();
// the exchange should be in the completed repo where we should be able to find it
final LevelDBFile levelDBFile = repo.getLevelDBFile();
final LevelDBCamelCodec codec = new LevelDBCamelCodec();
byte[] bf = levelDBFile.getDb().get(keyBuilder("repo1-completed", exchangeId));
// assert the exchange was not lost and we got all the information still
assertNotNull(bf);
Exchange completed = codec.unmarshallExchange(context, new Buffer(bf));
assertNotNull(completed);
// should retain the exchange id
assertEquals(exchangeId, completed.getExchangeId());
assertEquals("ABCDE", completed.getIn().getBody());
assertEquals(123, completed.getIn().getHeader("id"));
assertEquals("size", completed.getProperty(Exchange.AGGREGATED_COMPLETED_BY));
assertEquals(5, completed.getProperty(Exchange.AGGREGATED_SIZE));
// will store correlation keys as String
assertEquals("123", completed.getProperty(Exchange.AGGREGATED_CORRELATION_KEY));
}
use of org.fusesource.hawtbuf.Buffer in project camel by apache.
the class LevelDBAggregationRepository method confirm.
public void confirm(final CamelContext camelContext, final String exchangeId) {
LOG.debug("Confirming exchangeId [{}]", exchangeId);
byte[] confirmedLDBKey = keyBuilder(getRepositoryNameCompleted(), exchangeId);
byte[] rc = levelDBFile.getDb().get(confirmedLDBKey);
if (rc != null) {
levelDBFile.getDb().delete(confirmedLDBKey);
LOG.trace("Removed confirm index {} -> {}", exchangeId, new Buffer(rc));
}
}
use of org.fusesource.hawtbuf.Buffer in project camel by apache.
the class LevelDBAggregationRepository method recover.
public Exchange recover(CamelContext camelContext, final String exchangeId) {
Exchange answer = null;
try {
byte[] completedLDBKey = keyBuilder(getRepositoryNameCompleted(), exchangeId);
byte[] rc = levelDBFile.getDb().get(completedLDBKey);
if (rc != null) {
answer = codec.unmarshallExchange(camelContext, new Buffer(rc));
}
} catch (IOException e) {
throw new RuntimeException("Error recovering exchangeId " + exchangeId + " from repository " + repositoryName, e);
}
LOG.debug("Recovering exchangeId [{}] -> {}", exchangeId, answer);
return answer;
}
Aggregations