use of org.apache.activemq.artemis.core.io.SequentialFile in project activemq-artemis by apache.
the class JDBCSequentialFileFactoryTest method setup.
@Before
public void setup() throws Exception {
executor = Executors.newSingleThreadExecutor(ActiveMQThreadFactory.defaultThreadFactory());
String connectionUrl = "jdbc:derby:target/data;create=true";
String tableName = "FILES";
factory = new JDBCSequentialFileFactory(connectionUrl, className, JDBCUtils.getSQLProvider(className, tableName, SQLProvider.DatabaseStoreType.PAGE), executor, new IOCriticalErrorListener() {
@Override
public void onIOException(Throwable code, String message, SequentialFile file) {
}
});
factory.start();
}
use of org.apache.activemq.artemis.core.io.SequentialFile in project activemq-artemis by apache.
the class JDBCSequentialFileFactoryTest method testCreateFiles.
@Test
public void testCreateFiles() throws Exception {
int noFiles = 100;
List<SequentialFile> files = new LinkedList<>();
Set<String> fileNames = new HashSet<>();
for (int i = 0; i < noFiles; i++) {
String fileName = UUID.randomUUID().toString() + ".txt";
fileNames.add(fileName);
SequentialFile file = factory.createSequentialFile(fileName);
// We create files on Open
file.open();
files.add(file);
}
List<String> queryFileNames = factory.listFiles("txt");
assertTrue(queryFileNames.containsAll(fileNames));
for (SequentialFile file : files) {
file.close();
}
Assert.assertEquals(0, factory.getNumberOfOpenFiles());
}
use of org.apache.activemq.artemis.core.io.SequentialFile in project activemq-artemis by apache.
the class AbstractJournalUpdateTask method readControlFile.
public static SequentialFile readControlFile(final SequentialFileFactory fileFactory, final List<String> dataFiles, final List<String> newFiles, final List<Pair<String, String>> renameFile) throws Exception {
SequentialFile controlFile = fileFactory.createSequentialFile(AbstractJournalUpdateTask.FILE_COMPACT_CONTROL);
if (controlFile.exists()) {
JournalFile file = new JournalFileImpl(controlFile, 0, JournalImpl.FORMAT_VERSION);
final ArrayList<RecordInfo> records = new ArrayList<>();
JournalImpl.readJournalFile(fileFactory, file, new JournalReaderCallbackAbstract() {
@Override
public void onReadAddRecord(final RecordInfo info) throws Exception {
records.add(info);
}
});
if (records.size() == 0) {
// the record is damaged
controlFile.delete();
return null;
} else {
ActiveMQBuffer input = ActiveMQBuffers.wrappedBuffer(records.get(0).data);
int numberDataFiles = input.readInt();
for (int i = 0; i < numberDataFiles; i++) {
dataFiles.add(input.readUTF());
}
int numberNewFiles = input.readInt();
for (int i = 0; i < numberNewFiles; i++) {
newFiles.add(input.readUTF());
}
int numberRenames = input.readInt();
for (int i = 0; i < numberRenames; i++) {
String from = input.readUTF();
String to = input.readUTF();
renameFile.add(new Pair<>(from, to));
}
}
return controlFile;
} else {
return null;
}
}
use of org.apache.activemq.artemis.core.io.SequentialFile in project activemq-artemis by apache.
the class AbstractJournalUpdateTask method writeControlFile.
// Public --------------------------------------------------------
public static SequentialFile writeControlFile(final SequentialFileFactory fileFactory, final List<JournalFile> files, final List<JournalFile> newFiles, final List<Pair<String, String>> renames) throws Exception {
SequentialFile controlFile = fileFactory.createSequentialFile(AbstractJournalUpdateTask.FILE_COMPACT_CONTROL);
try {
controlFile.open(1, false);
JournalImpl.initFileHeader(fileFactory, controlFile, 0, 0);
ActiveMQBuffer filesToRename = ActiveMQBuffers.dynamicBuffer(1);
if (files == null) {
filesToRename.writeInt(0);
} else {
filesToRename.writeInt(files.size());
for (JournalFile file : files) {
filesToRename.writeUTF(file.getFile().getFileName());
}
}
if (newFiles == null) {
filesToRename.writeInt(0);
} else {
filesToRename.writeInt(newFiles.size());
for (JournalFile file : newFiles) {
filesToRename.writeUTF(file.getFile().getFileName());
}
}
// Renames from clean up third
if (renames == null) {
filesToRename.writeInt(0);
} else {
filesToRename.writeInt(renames.size());
for (Pair<String, String> rename : renames) {
filesToRename.writeUTF(rename.getA());
filesToRename.writeUTF(rename.getB());
}
}
JournalInternalRecord controlRecord = new JournalAddRecord(true, 1, (byte) 0, EncoderPersister.getInstance(), new ByteArrayEncoding(filesToRename.toByteBuffer().array()));
ActiveMQBuffer renameBuffer = ActiveMQBuffers.dynamicBuffer(filesToRename.writerIndex());
controlRecord.setFileID(0);
controlRecord.encode(renameBuffer);
ByteBuffer writeBuffer = fileFactory.newBuffer(renameBuffer.writerIndex());
writeBuffer.put(renameBuffer.toByteBuffer().array(), 0, renameBuffer.writerIndex());
writeBuffer.rewind();
controlFile.writeDirect(writeBuffer, true);
return controlFile;
} finally {
controlFile.close();
}
}
use of org.apache.activemq.artemis.core.io.SequentialFile in project activemq-artemis by apache.
the class JournalImpl method compact.
/**
* Note: This method can't be called from the main executor, as it will invoke other methods
* depending on it.
*
* Note: only synchronized methods on journal are methods responsible for the life-cycle such as
* stop, start records will still come as this is being executed
*/
public synchronized void compact() throws Exception {
if (compactor != null) {
throw new IllegalStateException("There is pending compacting operation");
}
if (ActiveMQJournalLogger.LOGGER.isDebugEnabled()) {
ActiveMQJournalLogger.LOGGER.debug("JournalImpl::compact compacting journal " + (++compactCount));
}
compactorLock.writeLock().lock();
try {
ArrayList<JournalFile> dataFilesToProcess = new ArrayList<>(filesRepository.getDataFilesCount());
boolean previousReclaimValue = isAutoReclaim();
try {
ActiveMQJournalLogger.LOGGER.debug("Starting compacting operation on journal");
onCompactStart();
// We need to guarantee that the journal is frozen for this short time
// We don't freeze the journal as we compact, only for the short time where we replace records
journalLock.writeLock().lock();
try {
if (state != JournalState.LOADED) {
return;
}
onCompactLockingTheJournal();
setAutoReclaim(false);
// We need to move to the next file, as we need a clear start for negatives and positives counts
moveNextFile(false);
// Take the snapshots and replace the structures
dataFilesToProcess.addAll(filesRepository.getDataFiles());
filesRepository.clearDataFiles();
if (dataFilesToProcess.size() == 0) {
logger.trace("Finishing compacting, nothing to process");
return;
}
compactor = new JournalCompactor(fileFactory, this, filesRepository, records.keysLongHashSet(), dataFilesToProcess.get(0).getFileID());
transactions.forEach((id, pendingTransaction) -> {
compactor.addPendingTransaction(id, pendingTransaction.getPositiveArray());
pendingTransaction.setCompacting();
});
// We will calculate the new records during compacting, what will take the position the records will take
// after compacting
records.clear();
} finally {
journalLock.writeLock().unlock();
}
Collections.sort(dataFilesToProcess, new JournalFileComparator());
// well
for (final JournalFile file : dataFilesToProcess) {
try {
JournalImpl.readJournalFile(fileFactory, file, compactor);
} catch (Throwable e) {
ActiveMQJournalLogger.LOGGER.compactReadError(file);
throw new Exception("Error on reading compacting for " + file, e);
}
}
compactor.flush();
// pointcut for tests
// We need to test concurrent updates on the journal, as the compacting is being performed.
// Usually tests will use this to hold the compacting while other structures are being updated.
onCompactDone();
List<JournalFile> newDatafiles = null;
JournalCompactor localCompactor = compactor;
SequentialFile controlFile = createControlFile(dataFilesToProcess, compactor.getNewDataFiles(), null);
journalLock.writeLock().lock();
try {
// Need to clear the compactor here, or the replay commands will send commands back (infinite loop)
compactor = null;
onCompactLockingTheJournal();
newDatafiles = localCompactor.getNewDataFiles();
// Restore newRecords created during compacting
localCompactor.getNewRecords().forEach((id, newRecord) -> {
records.put(id, newRecord);
});
// Restore compacted dataFiles
for (int i = newDatafiles.size() - 1; i >= 0; i--) {
JournalFile fileToAdd = newDatafiles.get(i);
if (logger.isTraceEnabled()) {
logger.trace("Adding file " + fileToAdd + " back as datafile");
}
filesRepository.addDataFileOnTop(fileToAdd);
}
if (logger.isTraceEnabled()) {
logger.trace("There are " + filesRepository.getDataFilesCount() + " datafiles Now");
}
// Replay pending commands (including updates, deletes and commits)
localCompactor.getNewTransactions().forEach((id, newTransaction) -> newTransaction.replaceRecordProvider(this));
localCompactor.replayPendingCommands();
// Merge transactions back after compacting.
// This has to be done after the replay pending commands, as we need to delete commits
// that happened during the compacting
localCompactor.getNewTransactions().forEach((id, newTransaction) -> {
if (logger.isTraceEnabled()) {
logger.trace("Merging pending transaction " + newTransaction + " after compacting the journal");
}
JournalTransaction liveTransaction = transactions.get(newTransaction.getId());
if (liveTransaction != null) {
liveTransaction.merge(newTransaction);
} else {
ActiveMQJournalLogger.LOGGER.compactMergeError(newTransaction.getId());
}
});
} finally {
journalLock.writeLock().unlock();
}
// At this point the journal is unlocked. We keep renaming files while the journal is already operational
renameFiles(dataFilesToProcess, newDatafiles);
deleteControlFile(controlFile);
ActiveMQJournalLogger.LOGGER.debug("Finished compacting on journal");
} finally {
// An Exception was probably thrown, and the compactor was not cleared
if (compactor != null) {
try {
compactor.flush();
} catch (Throwable ignored) {
}
compactor = null;
}
setAutoReclaim(previousReclaimValue);
}
} finally {
compactorLock.writeLock().unlock();
if (ActiveMQJournalLogger.LOGGER.isDebugEnabled()) {
ActiveMQJournalLogger.LOGGER.debug("JournalImpl::compact finishing");
}
}
}
Aggregations