use of java.util.Optional in project neo4j by neo4j.
the class StoreMigrator method moveMigratedFiles.
@Override
public void moveMigratedFiles(File migrationDir, File storeDir, String versionToUpgradeFrom, String versionToUpgradeTo) throws IOException {
// Move the migrated ones into the store directory
StoreFile.fileOperation(MOVE, fileSystem, migrationDir, storeDir, StoreFile.currentStoreFiles(), // allow to skip non existent source files
true, // allow to overwrite target files
ExistingTargetStrategy.OVERWRITE, StoreFileType.values());
// move the files with the page cache.
try {
Iterable<FileHandle> fileHandles = pageCache.streamFilesRecursive(migrationDir)::iterator;
for (FileHandle fh : fileHandles) {
Predicate<StoreFile> predicate = storeFile -> storeFile.fileName(StoreFileType.STORE).equals(fh.getFile().getName());
if (StreamSupport.stream(StoreFile.currentStoreFiles().spliterator(), false).anyMatch(predicate)) {
final Optional<PagedFile> optionalPagedFile = pageCache.getExistingMapping(fh.getFile());
if (optionalPagedFile.isPresent()) {
optionalPagedFile.get().close();
}
fh.rename(new File(storeDir, fh.getFile().getName()), StandardCopyOption.REPLACE_EXISTING);
}
}
} catch (NoSuchFileException e) {
//This means that we had no files only present in the page cache, this is fine.
}
RecordFormats oldFormat = selectForVersion(versionToUpgradeFrom);
RecordFormats newFormat = selectForVersion(versionToUpgradeTo);
boolean movingAwayFromVersionTrailers = oldFormat.hasCapability(VERSION_TRAILERS) && !newFormat.hasCapability(VERSION_TRAILERS);
if (movingAwayFromVersionTrailers) {
StoreFile.removeTrailers(versionToUpgradeFrom, fileSystem, storeDir, pageCache.pageSize());
}
File neoStore = new File(storeDir, MetaDataStore.DEFAULT_NAME);
long logVersion = MetaDataStore.getRecord(pageCache, neoStore, Position.LOG_VERSION);
long lastCommittedTx = MetaDataStore.getRecord(pageCache, neoStore, Position.LAST_TRANSACTION_ID);
// update or add upgrade id and time and other necessary neostore records
updateOrAddNeoStoreFieldsAsPartOfMigration(migrationDir, storeDir, versionToUpgradeTo);
// delete old logs
legacyLogs.deleteUnusedLogFiles(storeDir);
if (movingAwayFromVersionTrailers) {
// write a check point in the log in order to make recovery work in the newer version
new StoreMigratorCheckPointer(storeDir, fileSystem).checkPoint(logVersion, lastCommittedTx);
}
}
use of java.util.Optional in project ninja by ninjaframework.
the class TemplateEngineFreemarkerTest method before.
@Before
public void before() throws Exception {
//Setup that allows to to execute invoke(...) in a very minimal version.
when(ninjaProperties.getWithDefault(FREEMARKER_CONFIGURATION_FILE_SUFFIX, ".ftl.html")).thenReturn(".ftl.html");
templateEngineFreemarker = new TemplateEngineFreemarker(messages, lang, logger, templateEngineHelper, templateEngineManager, templateEngineFreemarkerReverseRouteMethod, templateEngineFreemarkerAssetsAtMethod, templateEngineFreemarkerWebJarsAtMethod, ninjaProperties);
when(lang.getLanguage(any(Context.class), any(Optional.class))).thenReturn(Optional.<String>empty());
Session session = Mockito.mock(Session.class);
when(session.isEmpty()).thenReturn(true);
when(context.getSession()).thenReturn(session);
when(context.getRoute()).thenReturn(route);
when(lang.getLocaleFromStringOrDefault(any(Optional.class))).thenReturn(Locale.ENGLISH);
FlashScope flashScope = Mockito.mock(FlashScope.class);
Map<String, String> flashScopeData = new HashMap<>();
when(flashScope.getCurrentFlashCookieData()).thenReturn(flashScopeData);
when(context.getFlashScope()).thenReturn(flashScope);
when(templateEngineHelper.getTemplateForResult(any(Route.class), any(Result.class), Mockito.anyString())).thenReturn("views/template.ftl.html");
writer = new StringWriter();
ResponseStreams responseStreams = mock(ResponseStreams.class);
when(context.finalizeHeaders(any(Result.class))).thenReturn(responseStreams);
when(responseStreams.getWriter()).thenReturn(writer);
}
use of java.util.Optional in project neo4j by neo4j.
the class ServerPoliciesPlugin method readEndpoints.
private List<Endpoint> readEndpoints(CoreTopology coreTopology, ReadReplicaTopology rrTopology, Policy policy) {
Set<ServerInfo> possibleReaders = rrTopology.members().entrySet().stream().map(entry -> new ServerInfo(entry.getValue().connectors().boltAddress(), entry.getKey(), entry.getValue().groups())).collect(Collectors.toSet());
if (allowReadsOnFollowers || possibleReaders.size() == 0) {
Set<MemberId> validCores = coreTopology.members().keySet();
try {
MemberId leader = leaderLocator.getLeader();
validCores = validCores.stream().filter(memberId -> !memberId.equals(leader)).collect(Collectors.toSet());
} catch (NoLeaderFoundException ignored) {
// we might end up using the leader for reading during this ttl, should be fine in general
}
for (MemberId validCore : validCores) {
Optional<CoreServerInfo> coreServerInfo = coreTopology.find(validCore);
if (coreServerInfo.isPresent()) {
CoreServerInfo serverInfo = coreServerInfo.get();
possibleReaders.add(new ServerInfo(serverInfo.connectors().boltAddress(), validCore, serverInfo.groups()));
}
}
}
Set<ServerInfo> readers = policy.apply(possibleReaders);
return readers.stream().map(r -> Endpoint.read(r.boltAddress())).collect(Collectors.toList());
}
use of java.util.Optional in project presto by prestodb.
the class HiveMetadata method buildTableObject.
private static Table buildTableObject(String queryId, String schemaName, String tableName, String tableOwner, List<HiveColumnHandle> columnHandles, HiveStorageFormat hiveStorageFormat, List<String> partitionedBy, Optional<HiveBucketProperty> bucketProperty, Map<String, String> additionalTableParameters, Path targetPath, boolean external, String prestoVersion) {
Map<String, HiveColumnHandle> columnHandlesByName = Maps.uniqueIndex(columnHandles, HiveColumnHandle::getName);
List<Column> partitionColumns = partitionedBy.stream().map(columnHandlesByName::get).map(column -> new Column(column.getName(), column.getHiveType(), column.getComment())).collect(toList());
Set<String> partitionColumnNames = ImmutableSet.copyOf(partitionedBy);
ImmutableList.Builder<Column> columns = ImmutableList.builder();
for (HiveColumnHandle columnHandle : columnHandles) {
String name = columnHandle.getName();
HiveType type = columnHandle.getHiveType();
if (!partitionColumnNames.contains(name)) {
verify(!columnHandle.isPartitionKey(), "Column handles are not consistent with partitioned by property");
columns.add(new Column(name, type, columnHandle.getComment()));
} else {
verify(columnHandle.isPartitionKey(), "Column handles are not consistent with partitioned by property");
}
}
ImmutableMap.Builder<String, String> tableParameters = ImmutableMap.<String, String>builder().put("comment", "Created by Presto").put(PRESTO_VERSION_NAME, prestoVersion).put(PRESTO_QUERY_ID_NAME, queryId).putAll(additionalTableParameters);
if (external) {
tableParameters.put("EXTERNAL", "TRUE");
}
Table.Builder tableBuilder = Table.builder().setDatabaseName(schemaName).setTableName(tableName).setOwner(tableOwner).setTableType((external ? EXTERNAL_TABLE : MANAGED_TABLE).name()).setDataColumns(columns.build()).setPartitionColumns(partitionColumns).setParameters(tableParameters.build());
tableBuilder.getStorageBuilder().setStorageFormat(fromHiveStorageFormat(hiveStorageFormat)).setBucketProperty(bucketProperty).setLocation(targetPath.toString());
return tableBuilder.build();
}
use of java.util.Optional in project presto by prestodb.
the class HiveMetadata method beginInsert.
@Override
public HiveInsertTableHandle beginInsert(ConnectorSession session, ConnectorTableHandle tableHandle) {
verifyJvmTimeZone();
SchemaTableName tableName = schemaTableName(tableHandle);
Optional<Table> table = metastore.getTable(tableName.getSchemaName(), tableName.getTableName());
if (!table.isPresent()) {
throw new TableNotFoundException(tableName);
}
checkTableIsWritable(table.get());
for (Column column : table.get().getDataColumns()) {
if (!isWritableType(column.getType())) {
throw new PrestoException(NOT_SUPPORTED, format("Inserting into Hive table %s.%s with column type %s not supported", table.get().getDatabaseName(), table.get().getTableName(), column.getType()));
}
}
List<HiveColumnHandle> handles = hiveColumnHandles(connectorId, table.get()).stream().filter(columnHandle -> !columnHandle.isHidden()).collect(toList());
HiveStorageFormat tableStorageFormat = extractHiveStorageFormat(table.get());
LocationHandle locationHandle = locationService.forExistingTable(metastore, session.getUser(), session.getQueryId(), table.get());
HiveInsertTableHandle result = new HiveInsertTableHandle(connectorId, tableName.getSchemaName(), tableName.getTableName(), handles, session.getQueryId(), metastore.generatePageSinkMetadata(tableName), locationHandle, table.get().getStorage().getBucketProperty(), tableStorageFormat, respectTableFormat ? tableStorageFormat : defaultStorageFormat);
Optional<Path> writePathRoot = locationService.writePathRoot(locationHandle);
Path targetPathRoot = locationService.targetPathRoot(locationHandle);
if (writePathRoot.isPresent()) {
WriteMode mode = writePathRoot.get().equals(targetPathRoot) ? DIRECT_TO_TARGET_NEW_DIRECTORY : STAGE_AND_MOVE_TO_TARGET_DIRECTORY;
metastore.declareIntentionToWrite(session, mode, writePathRoot.get(), result.getFilePrefix(), tableName);
} else {
metastore.declareIntentionToWrite(session, DIRECT_TO_TARGET_EXISTING_DIRECTORY, targetPathRoot, result.getFilePrefix(), tableName);
}
return result;
}
Aggregations