use of org.apache.gobblin.config.store.deploy.ConfigStream in project incubator-gobblin by apache.
the class SimpleHadoopFilesystemConfigStore method deploy.
/**
* Deploy configs provided by {@link FsDeploymentConfig#getDeployableConfigSource()} to HDFS.
* For each {@link ConfigStream} returned by {@link DeployableConfigSource#getConfigStreams()}, creates a resource on HDFS.
* <br>
* <ul> Does the following:
* <li> Read {@link ConfigStream}s and write them to HDFS
* <li> Create parent directories of {@link ConfigStream#getConfigPath()} if required
* <li> Set {@link FsDeploymentConfig#getStorePermissions()} to all resourced created on HDFS
* <li> Update current active version in the store metadata file.
* </ul>
*
* <p>
* For example: If "test-root" is a resource in classpath and all resources under it needs to be deployed,
* <br>
* <br>
* <b>In Classpath:</b><br>
* <blockquote> <code>
* test-root<br>
*  /data<br>
*   /set1<br>
*    /main.conf<br>
*  /tag<br>
*   /tag1<br>
*    /main.conf<br>
* </code> </blockquote>
* </p>
*
* <p>
* A new version 2.0.0 {@link FsDeploymentConfig#getNewVersion()} is created on HDFS under <code>this.physicalStoreRoot/_CONFIG_STORE</code>
* <br>
* <br>
* <b>On HDFS after deploy:</b><br>
* <blockquote> <code>
* /_CONFIG_STORE<br>
*  /2.0.0<br>
*   /data<br>
*    /set1<br>
*     /main.conf<br>
*   /tag<br>
*    /tag1<br>
*     /main.conf<br>
* </code> </blockquote>
* </p>
*/
@Override
public void deploy(FsDeploymentConfig deploymentConfig) throws IOException {
log.info("Deploying with config : " + deploymentConfig);
Path hdfsconfigStoreRoot = new Path(this.physicalStoreRoot.getPath(), CONFIG_STORE_NAME);
if (!this.fs.exists(hdfsconfigStoreRoot)) {
throw new IOException("Config store root not present at " + this.physicalStoreRoot.getPath());
}
Path hdfsNewVersionPath = new Path(hdfsconfigStoreRoot, deploymentConfig.getNewVersion());
if (!this.fs.exists(hdfsNewVersionPath)) {
this.fs.mkdirs(hdfsNewVersionPath, deploymentConfig.getStorePermissions());
Set<ConfigStream> confStreams = deploymentConfig.getDeployableConfigSource().getConfigStreams();
for (ConfigStream confStream : confStreams) {
String confAtPath = confStream.getConfigPath();
log.info("Copying resource at : " + confAtPath);
Path hdsfConfPath = new Path(hdfsNewVersionPath, confAtPath);
if (!this.fs.exists(hdsfConfPath.getParent())) {
this.fs.mkdirs(hdsfConfPath.getParent());
}
// If an empty directory needs to created it may not have a stream.
if (confStream.getInputStream().isPresent()) {
// Read the resource as a stream from the classpath and write it to HDFS
try (SeekableFSInputStream inputStream = new SeekableFSInputStream(confStream.getInputStream().get());
FSDataOutputStream os = this.fs.create(hdsfConfPath, false)) {
StreamUtils.copy(inputStream, os);
}
}
}
// Set permission for newly copied files
for (FileStatus fileStatus : FileListUtils.listPathsRecursively(this.fs, hdfsNewVersionPath, FileListUtils.NO_OP_PATH_FILTER)) {
this.fs.setPermission(fileStatus.getPath(), deploymentConfig.getStorePermissions());
}
} else {
log.warn(String.format("STORE WITH VERSION %s ALREADY EXISTS. NEW RESOURCES WILL NOT BE COPIED. ONLY STORE MEATADATA FILE WILL BE UPDATED TO %s", deploymentConfig.getNewVersion(), deploymentConfig.getNewVersion()));
}
this.storeMetadata.setCurrentVersion(deploymentConfig.getNewVersion());
log.info(String.format("New version %s of config store deployed at %s", deploymentConfig.getNewVersion(), hdfsconfigStoreRoot));
}
Aggregations