use of org.apache.ignite.internal.processors.igfs.IgfsHandshakeResponse in project ignite by apache.
the class IgfsControlResponse method readExternal.
/**
* Reads object from data input.
*
* @param in Data input.
* @throws IOException If read failed.
* @throws ClassNotFoundException If could not find class.
*/
public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException {
byte[] hdr = new byte[RES_HEADER_SIZE];
in.readFully(hdr);
resType = U.bytesToInt(hdr, 0);
boolean hasErr = hdr[4] != 0;
if (hasErr) {
err = in.readUTF();
errCode = in.readInt();
if (resType == RES_TYPE_ERR_STREAM_ID)
res = in.readLong();
return;
}
switch(resType) {
case RES_TYPE_BOOLEAN:
res = in.readBoolean();
break;
case RES_TYPE_LONG:
res = in.readLong();
break;
case RES_TYPE_IGFS_PATH:
{
boolean hasVal = in.readBoolean();
if (hasVal)
res = IgfsUtils.readPath(in);
break;
}
case RES_TYPE_IGFS_PATH_SUMMARY:
{
boolean hasVal = in.readBoolean();
if (hasVal) {
IgfsPathSummary sum = new IgfsPathSummary();
sum.readExternal(in);
res = sum;
}
break;
}
case RES_TYPE_IGFS_FILE:
{
boolean hasVal = in.readBoolean();
if (hasVal) {
IgfsFileImpl file = new IgfsFileImpl();
file.readExternal(in);
res = file;
}
break;
}
case RES_TYPE_IGFS_STREAM_DESCRIPTOR:
{
boolean hasVal = in.readBoolean();
if (hasVal) {
IgfsInputStreamDescriptor desc = new IgfsInputStreamDescriptor();
desc.readExternal(in);
res = desc;
}
break;
}
case RES_TYPE_HANDSHAKE:
{
boolean hasVal = in.readBoolean();
if (hasVal) {
IgfsHandshakeResponse msg = new IgfsHandshakeResponse();
msg.readExternal(in);
res = msg;
}
break;
}
case RES_TYPE_STATUS:
{
boolean hasVal = in.readBoolean();
if (hasVal) {
IgfsStatus msg = new IgfsStatus();
msg.readExternal(in);
res = msg;
}
break;
}
case RES_TYPE_COL_IGFS_FILE:
{
Collection<IgfsFile> files = null;
int size = in.readInt();
if (size >= 0) {
files = new ArrayList<>(size);
for (int i = 0; i < size; i++) {
IgfsFileImpl file = new IgfsFileImpl();
file.readExternal(in);
files.add(file);
}
}
res = files;
break;
}
case RES_TYPE_COL_IGFS_PATH:
{
Collection<IgfsPath> paths = null;
int size = in.readInt();
if (size >= 0) {
paths = new ArrayList<>(size);
for (int i = 0; i < size; i++) paths.add(IgfsUtils.readPath(in));
}
res = paths;
break;
}
case RES_TYPE_COL_IGFS_BLOCK_LOCATION:
{
Collection<IgfsBlockLocation> locations = null;
int size = in.readInt();
if (size >= 0) {
locations = new ArrayList<>(size);
for (int i = 0; i < size; i++) {
IgfsBlockLocationImpl location = new IgfsBlockLocationImpl();
location.readExternal(in);
locations.add(location);
}
}
res = locations;
break;
}
case RES_TYPE_MODE_RESOLVER:
{
boolean hasVal = in.readBoolean();
if (hasVal) {
IgfsModeResolver msg = new IgfsModeResolver();
msg.readExternal(in);
res = msg;
}
break;
}
case RES_TYPE_BYTE_ARRAY:
assert false : "Response type of byte array should never be processed by marshaller.";
}
}
use of org.apache.ignite.internal.processors.igfs.IgfsHandshakeResponse in project ignite by apache.
the class IgniteHadoopFileSystem method initialize.
/**
* @param name URI passed to constructor.
* @param cfg Configuration passed to constructor.
* @throws IOException If initialization failed.
*/
@SuppressWarnings("ConstantConditions")
private void initialize(URI name, Configuration cfg) throws IOException {
enterBusy();
try {
if (rmtClient != null)
throw new IOException("File system is already initialized: " + rmtClient);
A.notNull(name, "name");
A.notNull(cfg, "cfg");
if (!IGFS_SCHEME.equals(name.getScheme()))
throw new IOException("Illegal file system URI [expected=" + IGFS_SCHEME + "://[name]/[optional_path], actual=" + name + ']');
uriAuthority = name.getAuthority();
// Override sequential reads before prefetch if needed.
seqReadsBeforePrefetch = parameter(cfg, PARAM_IGFS_SEQ_READS_BEFORE_PREFETCH, uriAuthority, 0);
if (seqReadsBeforePrefetch > 0)
seqReadsBeforePrefetchOverride = true;
// In Ignite replication factor is controlled by data cache affinity.
// We use replication factor to force the whole file to be stored on local node.
dfltReplication = (short) cfg.getInt("dfs.replication", 3);
// Get file colocation control flag.
colocateFileWrites = parameter(cfg, PARAM_IGFS_COLOCATED_WRITES, uriAuthority, false);
preferLocFileWrites = cfg.getBoolean(PARAM_IGFS_PREFER_LOCAL_WRITES, false);
// Get log directory.
String logDirCfg = parameter(cfg, PARAM_IGFS_LOG_DIR, uriAuthority, DFLT_IGFS_LOG_DIR);
File logDirFile = U.resolveIgnitePath(logDirCfg);
String logDir = logDirFile != null ? logDirFile.getAbsolutePath() : null;
rmtClient = new HadoopIgfsWrapper(uriAuthority, logDir, cfg, LOG, user);
// Handshake.
IgfsHandshakeResponse handshake = rmtClient.handshake(logDir);
grpBlockSize = handshake.blockSize();
Boolean logEnabled = parameter(cfg, PARAM_IGFS_LOG_ENABLED, uriAuthority, false);
if (handshake.sampling() != null ? handshake.sampling() : logEnabled) {
// Initiate client logger.
if (logDir == null)
throw new IOException("Failed to resolve log directory: " + logDirCfg);
Integer batchSize = parameter(cfg, PARAM_IGFS_LOG_BATCH_SIZE, uriAuthority, DFLT_IGFS_LOG_BATCH_SIZE);
clientLog = IgfsLogger.logger(uriAuthority, handshake.igfsName(), logDir, batchSize);
} else
clientLog = IgfsLogger.disabledLogger();
} finally {
leaveBusy();
}
}
use of org.apache.ignite.internal.processors.igfs.IgfsHandshakeResponse in project ignite by apache.
the class IgniteHadoopFileSystem method initialize.
/**
* {@inheritDoc}
*/
@SuppressWarnings("ConstantConditions")
@Override
public void initialize(URI name, Configuration cfg) throws IOException {
enterBusy();
try {
if (rmtClient != null)
throw new IOException("File system is already initialized: " + rmtClient);
A.notNull(name, "name");
A.notNull(cfg, "cfg");
super.initialize(name, cfg);
setConf(cfg);
if (!IGFS_SCHEME.equals(name.getScheme()))
throw new IOException("Illegal file system URI [expected=" + IGFS_SCHEME + "://[name]/[optional_path], actual=" + name + ']');
uri = name;
uriAuthority = uri.getAuthority();
user = getFsHadoopUser();
// Override sequential reads before prefetch if needed.
seqReadsBeforePrefetch = parameter(cfg, PARAM_IGFS_SEQ_READS_BEFORE_PREFETCH, uriAuthority, 0);
if (seqReadsBeforePrefetch > 0)
seqReadsBeforePrefetchOverride = true;
// In Ignite replication factor is controlled by data cache affinity.
// We use replication factor to force the whole file to be stored on local node.
dfltReplication = (short) cfg.getInt("dfs.replication", 3);
// Get file colocation control flag.
colocateFileWrites = parameter(cfg, PARAM_IGFS_COLOCATED_WRITES, uriAuthority, false);
preferLocFileWrites = cfg.getBoolean(PARAM_IGFS_PREFER_LOCAL_WRITES, false);
// Get log directory.
String logDirCfg = parameter(cfg, PARAM_IGFS_LOG_DIR, uriAuthority, DFLT_IGFS_LOG_DIR);
File logDirFile = U.resolveIgnitePath(logDirCfg);
String logDir = logDirFile != null ? logDirFile.getAbsolutePath() : null;
rmtClient = new HadoopIgfsWrapper(uriAuthority, logDir, cfg, LOG, user);
// Handshake.
IgfsHandshakeResponse handshake = rmtClient.handshake(logDir);
igfsGrpBlockSize = handshake.blockSize();
// Initialize client logger.
Boolean logEnabled = parameter(cfg, PARAM_IGFS_LOG_ENABLED, uriAuthority, false);
if (handshake.sampling() != null ? handshake.sampling() : logEnabled) {
// Initiate client logger.
if (logDir == null)
throw new IOException("Failed to resolve log directory: " + logDirCfg);
Integer batchSize = parameter(cfg, PARAM_IGFS_LOG_BATCH_SIZE, uriAuthority, DFLT_IGFS_LOG_BATCH_SIZE);
clientLog = IgfsLogger.logger(uriAuthority, handshake.igfsName(), logDir, batchSize);
} else
clientLog = IgfsLogger.disabledLogger();
// set working directory to the home directory of the current Fs user:
setWorkingDirectory(null);
} finally {
leaveBusy();
}
}
Aggregations