use of org.apache.geode.GemFireIOException in project geode by apache.
the class CacheServerImpl method getAttribFactoryForClientMessagesRegion.
public static AttributesFactory getAttribFactoryForClientMessagesRegion(InternalCache cache, String ePolicy, int capacity, String overflowDir, boolean isDiskStore) throws InvalidValueException, GemFireIOException {
AttributesFactory factory = new AttributesFactory();
factory.setScope(Scope.LOCAL);
if (isDiskStore) {
// overflowDir parameter is actually diskstore name
factory.setDiskStoreName(overflowDir);
// client subscription queue is always overflow to disk, so do async
// see feature request #41479
factory.setDiskSynchronous(true);
} else if (overflowDir == null || overflowDir.equals(ClientSubscriptionConfig.DEFAULT_OVERFLOW_DIRECTORY)) {
factory.setDiskStoreName(null);
// client subscription queue is always overflow to disk, so do async
// see feature request #41479
factory.setDiskSynchronous(true);
} else {
File dir = new File(overflowDir + File.separatorChar + generateNameForClientMsgsRegion(OSProcess.getId()));
// This will delete the overflow directory when virtual machine terminates.
dir.deleteOnExit();
if (!dir.mkdirs() && !dir.isDirectory()) {
throw new GemFireIOException("Could not create client subscription overflow directory: " + dir.getAbsolutePath());
}
File[] dirs = { dir };
DiskStoreFactory dsf = cache.createDiskStoreFactory();
dsf.setAutoCompact(true).setDiskDirsAndSizes(dirs, new int[] { MAX_VALUE }).create("bsi");
factory.setDiskStoreName("bsi");
// backward compatibility, it was sync
factory.setDiskSynchronous(true);
}
factory.setDataPolicy(DataPolicy.NORMAL);
// enable statistics
factory.setStatisticsEnabled(true);
/* setting LIFO related eviction attributes */
if (HARegionQueue.HA_EVICTION_POLICY_ENTRY.equals(ePolicy)) {
factory.setEvictionAttributes(EvictionAttributes.createLIFOEntryAttributes(capacity, EvictionAction.OVERFLOW_TO_DISK));
} else if (HARegionQueue.HA_EVICTION_POLICY_MEMORY.equals(ePolicy)) {
// condition refinement
factory.setEvictionAttributes(EvictionAttributes.createLIFOMemoryAttributes(capacity, EvictionAction.OVERFLOW_TO_DISK));
} else {
// throw invalid eviction policy exception
throw new InvalidValueException(LocalizedStrings.CacheServerImpl__0_INVALID_EVICTION_POLICY.toLocalizedString(ePolicy));
}
return factory;
}
use of org.apache.geode.GemFireIOException in project geode by apache.
the class TestLogWriterFactory method createLogWriter.
public static LogWriter createLogWriter(final boolean appendToFile, final boolean isLoner, final boolean isSecurityLog, final DistributionConfig config, final boolean logConfig, final FileOutputStream[] FOSHolder) {
assertFalse(isSecurityLog);
LogWriter logger = null;
File logFile = config.getLogFile();
assertNotNull(logFile);
PrintStream out;
String firstMsg = null;
boolean firstMsgWarning = false;
LogWriter mlw = null;
if (logFile == null || logFile.equals(new File(""))) {
out = System.out;
} else {
if (logFile.exists()) {
boolean useChildLogging = config.getLogFile() != null && !config.getLogFile().equals(new File("")) && config.getLogFileSizeLimit() != 0;
boolean statArchivesRolling = config.getStatisticArchiveFile() != null && !config.getStatisticArchiveFile().equals(new File("")) && config.getArchiveFileSizeLimit() != 0 && config.getStatisticSamplingEnabled();
if (!appendToFile || useChildLogging || statArchivesRolling) {
// check useChildLogging for
// bug 50659
File oldMain = ManagerLogWriter.getLogNameForOldMainLog(logFile, isSecurityLog || useChildLogging || statArchivesRolling);
boolean succeeded = LogFileUtils.renameAggressively(logFile, oldMain);
if (succeeded) {
firstMsg = LocalizedStrings.InternalDistributedSystem_RENAMED_OLD_LOG_FILE_TO_0.toLocalizedString(oldMain);
} else {
firstMsgWarning = true;
firstMsg = LocalizedStrings.InternalDistributedSystem_COULD_NOT_RENAME_0_TO_1.toLocalizedString(new Object[] { logFile, oldMain });
}
}
}
FileOutputStream fos;
try {
fos = new FileOutputStream(logFile, true);
} catch (FileNotFoundException ex) {
String s = LocalizedStrings.InternalDistributedSystem_COULD_NOT_OPEN_LOG_FILE_0.toLocalizedString(logFile);
throw new GemFireIOException(s, ex);
}
out = new PrintStream(fos);
if (FOSHolder != null) {
FOSHolder[0] = fos;
}
if (isSecurityLog) {
mlw = new SecurityManagerLogWriter(config.getSecurityLogLevel(), out, config.getName());
} else {
mlw = new ManagerLogWriter(config.getLogLevel(), out, config.getName());
}
((ManagerLogWriter) mlw).setConfig(config);
}
if (mlw.infoEnabled()) {
if (!isLoner || /* do this on a loner to fix bug 35602 */
!Boolean.getBoolean(InternalLocator.INHIBIT_DM_BANNER)) {
mlw.info(Banner.getString(null));
}
}
logger = mlw;
if (firstMsg != null) {
if (firstMsgWarning) {
logger.warning(firstMsg);
} else {
logger.info(firstMsg);
}
}
if (logConfig && logger.configEnabled()) {
logger.convertToLogWriterI18n().config(LocalizedStrings.InternalDistributedSystem_STARTUP_CONFIGURATIONN_0, config.toLoggerString());
}
// fix #46493 by moving redirectOutput invocation here
if (ProcessLauncherContext.isRedirectingOutput()) {
try {
OSProcess.redirectOutput(config.getLogFile());
} catch (IOException e) {
logger.error(e);
// throw new GemFireIOException("Unable to redirect output to " + config.getLogFile(), e);
}
}
return logger;
}
use of org.apache.geode.GemFireIOException in project geode by apache.
the class InternalDistributedSystem method initialize.
/**
* Initializes this connection to a distributed system with the current configuration state.
*/
private void initialize() {
if (this.originalConfig.getLocators().equals("")) {
if (this.originalConfig.getMcastPort() != 0) {
throw new GemFireConfigException("The " + LOCATORS + " attribute can not be empty when the " + MCAST_PORT + " attribute is non-zero.");
} else {
// no distribution
this.isLoner = true;
}
}
this.config = new RuntimeDistributionConfigImpl(this);
if (!this.isLoner) {
this.attemptingToReconnect = (reconnectAttemptCounter > 0);
}
try {
SocketCreatorFactory.setDistributionConfig(config);
AlertAppender.getInstance().onConnect(this);
// LOG: create LogWriterAppender(s) if log-file or security-log-file is specified
final boolean hasLogFile = this.config.getLogFile() != null && !this.config.getLogFile().equals(new File(""));
final boolean hasSecurityLogFile = this.config.getSecurityLogFile() != null && !this.config.getSecurityLogFile().equals(new File(""));
LogService.configureLoggers(hasLogFile, hasSecurityLogFile);
if (hasLogFile || hasSecurityLogFile) {
// main log file
if (hasLogFile) {
// if log-file then create logWriterAppender
this.logWriterAppender = LogWriterAppenders.getOrCreateAppender(LogWriterAppenders.Identifier.MAIN, this.isLoner, this.config, true);
}
// security log file
if (hasSecurityLogFile) {
// if security-log-file then create securityLogWriterAppender
this.securityLogWriterAppender = LogWriterAppenders.getOrCreateAppender(LogWriterAppenders.Identifier.SECURITY, this.isLoner, this.config, false);
} else {
// let security route to regular log-file or stdout
}
}
// getSecurityLogWriter
if (this.logWriter == null) {
this.logWriter = LogWriterFactory.createLogWriterLogger(this.isLoner, false, this.config, true);
this.logWriter.fine("LogWriter is created.");
}
if (this.securityLogWriter == null) {
// LOG: whole new LogWriterLogger instance for security
this.securityLogWriter = LogWriterFactory.createLogWriterLogger(this.isLoner, true, this.config, false);
this.securityLogWriter.fine("SecurityLogWriter is created.");
}
Services.setLogWriter(this.logWriter);
Services.setSecurityLogWriter(this.securityLogWriter);
this.clock = new DSClock(this.isLoner);
if (this.attemptingToReconnect && logger.isDebugEnabled()) {
logger.debug("This thread is initializing a new DistributedSystem in order to reconnect to other members");
}
// bridge server and will need to enforce the member limit
if (Boolean.getBoolean(InternalLocator.FORCE_LOCATOR_DM_TYPE)) {
this.locatorDMTypeForced = true;
}
// Initialize the Diffie-Hellman and public/private keys
try {
HandShake.initCertsMap(this.config.getSecurityProps());
HandShake.initPrivateKey(this.config.getSecurityProps());
HandShake.initDHKeys(this.config);
} catch (Exception ex) {
throw new GemFireSecurityException(LocalizedStrings.InternalDistributedSystem_PROBLEM_IN_INITIALIZING_KEYS_FOR_CLIENT_AUTHENTICATION.toLocalizedString(), ex);
}
final long offHeapMemorySize = OffHeapStorage.parseOffHeapMemorySize(getConfig().getOffHeapMemorySize());
this.offHeapStore = OffHeapStorage.createOffHeapStorage(this, offHeapMemorySize, this);
// Note: this can only happen on a linux system
if (getConfig().getLockMemory()) {
// This calculation is not exact, but seems fairly close. So far we have
// not loaded much into the heap and the current RSS usage is already
// included the available memory calculation.
long avail = LinuxProcFsStatistics.getAvailableMemory(logger);
long size = offHeapMemorySize + Runtime.getRuntime().totalMemory();
if (avail < size) {
if (ALLOW_MEMORY_LOCK_WHEN_OVERCOMMITTED) {
logger.warn(LocalizedMessage.create(LocalizedStrings.InternalDistributedSystem_MEMORY_OVERCOMMIT_WARN, size - avail));
} else {
throw new IllegalStateException(LocalizedStrings.InternalDistributedSystem_MEMORY_OVERCOMMIT.toLocalizedString(avail, size));
}
}
logger.info("Locking memory. This may take a while...");
GemFireCacheImpl.lockMemory();
logger.info("Finished locking memory.");
}
try {
startInitLocator();
} catch (InterruptedException e) {
throw new SystemConnectException("Startup has been interrupted", e);
}
synchronized (this.isConnectedMutex) {
this.isConnected = true;
}
if (!this.isLoner) {
try {
if (this.quorumChecker != null) {
this.quorumChecker.suspend();
}
this.dm = DistributionManager.create(this);
// fix bug #46324
if (InternalLocator.hasLocator()) {
InternalLocator locator = InternalLocator.getLocator();
getDistributionManager().addHostedLocators(getDistributedMember(), InternalLocator.getLocatorStrings(), locator.isSharedConfigurationEnabled());
}
} finally {
if (this.dm == null && this.quorumChecker != null) {
this.quorumChecker.resume();
}
setDisconnected();
}
} else {
this.dm = new LonerDistributionManager(this, this.logWriter);
}
Assert.assertTrue(this.dm != null);
Assert.assertTrue(this.dm.getSystem() == this);
try {
this.id = this.dm.getChannelId();
} catch (DistributedSystemDisconnectedException e) {
// but during startup we should instead throw a SystemConnectException
throw new SystemConnectException(LocalizedStrings.InternalDistributedSystem_DISTRIBUTED_SYSTEM_HAS_DISCONNECTED.toLocalizedString(), e);
}
synchronized (this.isConnectedMutex) {
this.isConnected = true;
}
if (attemptingToReconnect && (this.startedLocator == null)) {
try {
startInitLocator();
} catch (InterruptedException e) {
throw new SystemConnectException("Startup has been interrupted", e);
}
}
try {
endInitLocator();
} catch (IOException e) {
throw new GemFireIOException("Problem finishing a locator service start", e);
}
if (!statsDisabled) {
// to fix bug 42527 we need a sampler
// even if sampling is not enabled.
this.sampler = new GemFireStatSampler(this);
this.sampler.start();
}
if (this.logWriterAppender != null) {
LogWriterAppenders.startupComplete(LogWriterAppenders.Identifier.MAIN);
}
if (this.securityLogWriterAppender != null) {
LogWriterAppenders.startupComplete(LogWriterAppenders.Identifier.SECURITY);
}
// this.logger.info("ds created", new RuntimeException("DEBUG: STACK"));
// Log any instantiators that were registered before the log writer
// was created
InternalInstantiator.logInstantiators();
} catch (RuntimeException ex) {
this.config.close();
throw ex;
}
resourceListeners = new CopyOnWriteArrayList<ResourceEventsListener>();
this.reconnected = this.attemptingToReconnect;
this.attemptingToReconnect = false;
}
use of org.apache.geode.GemFireIOException in project geode by apache.
the class CacheCreation method startCacheServers.
/**
* starts declarative cache servers if a server is not running on the port already. Also adds a
* default server to the param declarativeCacheServers if a serverPort is specified.
*/
void startCacheServers(List<CacheServer> declarativeCacheServers, Cache cache, Integer serverPort, String serverBindAdd, Boolean disableDefaultServer) {
if (declarativeCacheServers.size() > 1 && (serverPort != null || serverBindAdd != null)) {
throw new RuntimeException(LocalizedStrings.CacheServerLauncher_SERVER_PORT_MORE_THAN_ONE_CACHE_SERVER.toLocalizedString());
}
CacheServerCreation defaultServer = null;
boolean hasServerPortOrBindAddress = serverPort != null || serverBindAdd != null;
boolean isDefaultServerDisabled = disableDefaultServer == null || !disableDefaultServer;
if (declarativeCacheServers.isEmpty() && hasServerPortOrBindAddress && isDefaultServerDisabled) {
boolean existingCacheServer = false;
List<CacheServer> cacheServers = cache.getCacheServers();
if (cacheServers != null) {
for (CacheServer cacheServer : cacheServers) {
if (serverPort == cacheServer.getPort()) {
existingCacheServer = true;
}
}
}
if (!existingCacheServer) {
defaultServer = new CacheServerCreation((InternalCache) cache, false);
declarativeCacheServers.add(defaultServer);
}
}
for (CacheServer declarativeCacheServer : declarativeCacheServers) {
CacheServerCreation declaredCacheServer = (CacheServerCreation) declarativeCacheServer;
boolean startServer = true;
List<CacheServer> cacheServers = cache.getCacheServers();
if (cacheServers != null) {
for (CacheServer cacheServer : cacheServers) {
if (declaredCacheServer.getPort() == cacheServer.getPort()) {
startServer = false;
}
}
}
if (!startServer) {
continue;
}
CacheServerImpl impl = (CacheServerImpl) cache.addCacheServer();
impl.configureFrom(declaredCacheServer);
if (declaredCacheServer == defaultServer) {
impl.setIsDefaultServer();
}
if (serverPort != null && serverPort != CacheServer.DEFAULT_PORT) {
impl.setPort(serverPort);
}
if (serverBindAdd != null) {
impl.setBindAddress(serverBindAdd.trim());
}
try {
if (!impl.isRunning()) {
impl.start();
}
} catch (IOException ex) {
throw new GemFireIOException(LocalizedStrings.CacheCreation_WHILE_STARTING_CACHE_SERVER_0.toLocalizedString(impl), ex);
}
}
}
use of org.apache.geode.GemFireIOException in project geode by apache.
the class DiskStoreCompacter method main.
public static void main(String[] args) {
String errorString = null;
String stackTraceString = null;
String diskStoreName = null;
String diskDirsStr = null;
;
String[] diskDirs = null;
;
String maxOpLogSize = null;
;
long maxOplogSize = -1;
try {
if (args.length < 3) {
throw new IllegalArgumentException("Requires 3 arguments : <diskStoreName> <diskDirs> <maxOplogSize>");
}
Properties prop = new Properties();
try {
prop.load(new StringReader(args[0] + GfshParser.LINE_SEPARATOR + args[1] + GfshParser.LINE_SEPARATOR + args[2]));
} catch (IOException e) {
throw new IllegalArgumentException("Requires 3 arguments : <diskStoreName> <diskDirs> <maxOplogSize>");
}
diskStoreName = prop.getProperty(CliStrings.COMPACT_OFFLINE_DISK_STORE__NAME);
diskDirsStr = prop.getProperty(CliStrings.COMPACT_OFFLINE_DISK_STORE__DISKDIRS);
diskDirs = diskDirsStr.split(",");
maxOpLogSize = prop.getProperty(CliStrings.COMPACT_OFFLINE_DISK_STORE__MAXOPLOGSIZE);
maxOplogSize = Long.valueOf(maxOpLogSize);
compact(diskStoreName, diskDirs, maxOplogSize);
} catch (GemFireIOException e) {
Throwable cause = e.getCause();
if (cause instanceof IllegalStateException) {
String message = cause.getMessage();
if (stringMatches(LocalizedStrings.DiskInitFile_THE_INIT_FILE_0_DOES_NOT_EXIST.toLocalizedString("(.*)"), message)) {
errorString = CliStrings.format(CliStrings.COMPACT_OFFLINE_DISK_STORE__MSG__VERIFY_WHETHER_DISKSTORE_EXISTS_IN_0, CliUtil.arrayToString(diskDirs));
} else {
errorString = message;
}
} else if (cause instanceof DiskAccessException) {
boolean isKnownCause = false;
Throwable nestedCause = cause.getCause();
if (nestedCause instanceof IOException) {
String message = nestedCause.getMessage();
if (stringMatches(LocalizedStrings.Oplog_THE_FILE_0_IS_BEING_USED_BY_ANOTHER_PROCESS.toLocalizedString("(.*)"), message)) {
errorString = CliStrings.COMPACT_OFFLINE_DISK_STORE__MSG__DISKSTORE_IN_USE_COMPACT_DISKSTORE_CAN_BE_USED;
isKnownCause = true;
}
}
if (!isKnownCause) {
errorString = CliStrings.format(CliStrings.COMPACT_OFFLINE_DISK_STORE__MSG__CANNOT_ACCESS_DISKSTORE_0_FROM_1_CHECK_GFSH_LOGS, new Object[] { diskStoreName, CliUtil.arrayToString(diskDirs) });
}
} else {
// which are other known exceptions?
errorString = e.getMessage();
}
stackTraceString = CliUtil.stackTraceAsString(e);
} catch (IllegalArgumentException e) {
errorString = e.getMessage();
stackTraceString = CliUtil.stackTraceAsString(e);
} finally {
if (errorString != null) {
System.err.println(errorString);
}
if (stackTraceString != null) {
System.err.println(STACKTRACE_START);
System.err.println(stackTraceString);
}
}
}
Aggregations