use of alluxio.proto.journal.Journal in project alluxio by Alluxio.
the class AccessTimeUpdaterTest method updateAccessTimeAsync.
@Test
public void updateAccessTimeAsync() throws Exception {
mAccessTimeUpdater = new AccessTimeUpdater(mFileSystemMaster, mInodeTree, mContext.getJournalSystem(), 10 * Constants.SECOND_MS, 0, 0);
mAccessTimeUpdater.start(mScheduler);
String path = "/foo";
createInode(path, CreateFileContext.defaults());
JournalContext journalContext = mock(JournalContext.class);
when(journalContext.get()).thenReturn(journalContext);
when(mFileSystemMaster.createJournalContext()).thenReturn(journalContext);
long accessTime = CommonUtils.getCurrentMs() + 100L;
long inodeId;
try (LockedInodePath lockedInodes = mInodeTree.lockFullInodePath(new AlluxioURI(path), InodeTree.LockPattern.READ)) {
mAccessTimeUpdater.updateAccessTime(journalContext, lockedInodes.getInode(), accessTime);
inodeId = lockedInodes.getInode().getId();
}
// verify inode attribute is updated
assertEquals(accessTime, mInodeStore.get(inodeId).get().getLastAccessTimeMs());
mScheduler.jumpAndExecute(1, TimeUnit.SECONDS);
// verify journal entry is NOT logged yet
verify(journalContext, never()).append(any(Journal.JournalEntry.class));
// wait for the flush to complete
mScheduler.jumpAndExecute(11, TimeUnit.SECONDS);
// / verify journal entry is logged after the flush interval
ArgumentCaptor<Journal.JournalEntry> captor = ArgumentCaptor.forClass(Journal.JournalEntry.class);
verify(journalContext).append(captor.capture());
assertTrue(captor.getValue().hasUpdateInode());
assertEquals(inodeId, captor.getValue().getUpdateInode().getId());
assertEquals(accessTime, captor.getValue().getUpdateInode().getLastAccessTimeMs());
}
use of alluxio.proto.journal.Journal in project alluxio by Alluxio.
the class InodeTreeTest method streamToJournalCheckpoint.
/**
* Tests that streaming to a journal checkpoint works.
*/
@Test
public void streamToJournalCheckpoint() throws Exception {
verifyJournal(mTree, Arrays.asList(getInodeByPath("/")));
// test nested URI
createPath(mTree, NESTED_FILE_URI, sNestedFileContext);
verifyJournal(mTree, StreamUtils.map(path -> getInodeByPath(path), Arrays.asList("/", "/nested", "/nested/test", "/nested/test/file")));
// add a sibling of test and verify journaling is in correct order (breadth first)
createPath(mTree, new AlluxioURI("/nested/test1/file1"), sNestedFileContext);
verifyJournal(mTree, StreamUtils.map(path -> getInodeByPath(path), Arrays.asList("/", "/nested", "/nested/test", "/nested/test1", "/nested/test/file", "/nested/test1/file1")));
}
use of alluxio.proto.journal.Journal in project alluxio by Alluxio.
the class Database method sync.
/**
* Syncs the metadata from the under db. To avoid concurrent sync operations, this requires
* external synchronization.
*
* @param context journal context
* @return the resulting sync status
*/
public SyncStatus sync(JournalContext context) throws IOException {
// Keep track of the status of each syncing table.
// Synchronization is necessary if accessed concurrently from multiple threads
SyncStatus.Builder builder = SyncStatus.newBuilder();
if (!mConfigPath.equals(CatalogProperty.DB_CONFIG_FILE.getDefaultValue())) {
if (!Files.exists(Paths.get(mConfigPath))) {
throw new FileNotFoundException(mConfigPath);
}
ObjectMapper mapper = new ObjectMapper();
try {
mDbConfig = mapper.readValue(new File(mConfigPath), DbConfig.class);
} catch (JsonProcessingException e) {
LOG.error("Failed to deserialize UDB config file {}, stays unsynced", mConfigPath, e);
throw e;
}
}
DatabaseInfo newDbInfo = mUdb.getDatabaseInfo();
if (!newDbInfo.equals(mDatabaseInfo)) {
applyAndJournal(context, Journal.JournalEntry.newBuilder().setUpdateDatabaseInfo(toJournalProto(newDbInfo, mName)).build());
}
Set<String> udbTableNames = new HashSet<>(mUdb.getTableNames());
// keeps track of how many tables have been synced
final AtomicInteger tablesSynced = new AtomicInteger();
// # of synced tables, after which a log message is printed for progress
final int progressBatch = (udbTableNames.size() < 100) ? udbTableNames.size() : udbTableNames.size() / 10;
// sync each table in parallel, with the executor service
List<Callable<Void>> tasks = new ArrayList<>(udbTableNames.size());
final Database thisDb = this;
for (String tableName : udbTableNames) {
if (mIgnoreTables.contains(tableName)) {
// this table should be ignored.
builder.addTablesIgnored(tableName);
tablesSynced.incrementAndGet();
continue;
}
tasks.add(() -> {
// Save all exceptions
try {
Table previousTable = mTables.get(tableName);
UdbTable udbTable = mUdb.getTable(tableName, mDbConfig.getUdbBypassSpec());
Table newTable = Table.create(thisDb, udbTable, previousTable);
if (newTable != null) {
// table was created or was updated
alluxio.proto.journal.Table.AddTableEntry addTableEntry = newTable.getTableJournalProto();
Journal.JournalEntry entry = Journal.JournalEntry.newBuilder().setAddTable(addTableEntry).build();
applyAndJournal(context, entry);
// separate the possible big table entry into multiple smaller table partitions entry
newTable.getTablePartitionsJournalProto().forEach((partitionsEntry) -> {
applyAndJournal(context, Journal.JournalEntry.newBuilder().setAddTablePartitions(partitionsEntry).build());
});
synchronized (builder) {
builder.addTablesUpdated(tableName);
}
} else {
synchronized (builder) {
builder.addTablesUnchanged(tableName);
}
}
} catch (Exception e) {
LOG.error(String.format("Sync thread failed for %s.%s", thisDb.mName, tableName), e);
synchronized (builder) {
builder.putTablesErrors(tableName, e.toString());
}
} finally {
int syncedTables = tablesSynced.incrementAndGet();
int percentage = -1;
// Only log at regular intervals, or when complete
if (syncedTables % progressBatch == 0) {
// compute percentage, cap at 99%
percentage = Math.min(Math.round(100.0f * syncedTables / udbTableNames.size()), 99);
}
if (syncedTables == udbTableNames.size()) {
percentage = 100;
}
if (percentage != -1) {
LOG.info("Syncing db {} progress: completed {} of {} tables ({}%)", mName, syncedTables, udbTableNames.size(), percentage);
}
}
return null;
});
}
// create a thread pool to parallelize the sync
int threads;
try {
threads = Integer.parseInt(mConfig.get(CatalogProperty.DB_SYNC_THREADS));
} catch (NumberFormatException e) {
LOG.warn("Catalog property {} with value {} cannot be parsed as an int", CatalogProperty.DB_SYNC_THREADS.getName(), mConfig.get(CatalogProperty.DB_SYNC_THREADS));
threads = CatalogProperty.DEFAULT_DB_SYNC_THREADS;
}
if (threads < 1) {
// if invalid, set to the default
threads = CatalogProperty.DEFAULT_DB_SYNC_THREADS;
}
ExecutorService service = ExecutorServiceFactories.fixedThreadPool(String.format("Catalog-Sync-%s", mName), threads).create();
try {
CommonUtils.invokeAll(service, tasks, mUdbSyncTimeoutMs);
} catch (Exception e) {
throw new IOException("Failed to sync database " + mName + ". error: " + e.toString(), e);
} finally {
// shutdown the thread pool
service.shutdownNow();
String errorMessage = String.format("waiting for db-sync thread pool to shut down. db: %s", mName);
try {
if (!service.awaitTermination(5, TimeUnit.SECONDS)) {
LOG.warn("Timed out " + errorMessage);
}
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
LOG.warn("Interrupted while " + errorMessage);
}
}
for (Table existingTable : mTables.values()) {
if (!udbTableNames.contains(existingTable.getName())) {
// this table no longer exists in udb
alluxio.proto.journal.Table.RemoveTableEntry removeTableEntry = alluxio.proto.journal.Table.RemoveTableEntry.newBuilder().setDbName(mName).setTableName(existingTable.getName()).setVersion(existingTable.getVersion()).build();
Journal.JournalEntry entry = Journal.JournalEntry.newBuilder().setRemoveTable(removeTableEntry).build();
applyAndJournal(context, entry);
builder.addTablesRemoved(existingTable.getName());
}
}
return builder.build();
}
use of alluxio.proto.journal.Journal in project alluxio by Alluxio.
the class MountTable method streamToJournalCheckpoint.
@Override
public void streamToJournalCheckpoint(JournalOutputStream outputStream) throws IOException {
for (Map.Entry<String, MountInfo> entry : mMountTable.entrySet()) {
String alluxioPath = entry.getKey();
MountInfo info = entry.getValue();
// do not journal the root mount point
if (alluxioPath.equals(ROOT)) {
continue;
}
Map<String, String> properties = info.getOptions().getProperties();
List<File.StringPairEntry> protoProperties = new ArrayList<>(properties.size());
for (Map.Entry<String, String> property : properties.entrySet()) {
protoProperties.add(File.StringPairEntry.newBuilder().setKey(property.getKey()).setValue(property.getValue()).build());
}
AddMountPointEntry addMountPoint = AddMountPointEntry.newBuilder().setAlluxioPath(alluxioPath).setUfsPath(info.getUfsUri().toString()).setReadOnly(info.getOptions().isReadOnly()).addAllProperties(protoProperties).setShared(info.getOptions().isShared()).build();
Journal.JournalEntry journalEntry = Journal.JournalEntry.newBuilder().setAddMountPoint(addMountPoint).build();
outputStream.writeEntry(journalEntry);
}
}
use of alluxio.proto.journal.Journal in project alluxio by Alluxio.
the class RaftJournalTest method createNewJournalSystem.
/**
* Creates list of raft journal systems in a clustered mode.
*/
private RaftJournalSystem createNewJournalSystem(RaftJournalSystem seed) throws Exception {
List<InetSocketAddress> clusterAddresses = seed.getQuorumServerInfoList().stream().map(QuorumServerInfo::getServerAddress).map(address -> InetSocketAddress.createUnresolved(address.getHost(), address.getRpcPort())).collect(Collectors.toList());
List<Integer> freePorts = getFreePorts(1);
InetSocketAddress joinAddr = InetSocketAddress.createUnresolved("localhost", freePorts.get(0));
clusterAddresses.add(joinAddr);
return RaftJournalSystem.create(RaftJournalConfiguration.defaults(NetworkAddressUtils.ServiceType.MASTER_RAFT).setPath(mFolder.newFolder()).setClusterAddresses(clusterAddresses).setLocalAddress(joinAddr));
}
Aggregations