use of org.pentaho.platform.api.repository2.unified.data.node.NodeRepositoryFileData in project pentaho-kettle by pentaho.
the class PurRepository method loadTransformation.
@Override
public TransMeta loadTransformation(final String transName, final RepositoryDirectoryInterface parentDir, final ProgressMonitorListener monitor, final boolean setInternalVariables, final String versionId) throws KettleException {
String absPath = null;
try {
// by the user
if (StringUtils.isBlank(transName)) {
throw new KettleFileException(BaseMessages.getString(PKG, "PurRepository.ERROR_0007_TRANSFORMATION_NAME_MISSING"));
}
try {
absPath = getPath(transName, parentDir, RepositoryObjectType.TRANSFORMATION);
} catch (Exception e) {
// ignore and handle null value below
}
// variable that is not available at runtime
if (StringUtils.isBlank(absPath)) {
// Couldn't resolve path, throw an exception
throw new KettleFileException(BaseMessages.getString(PKG, "PurRepository.ERROR_0008_TRANSFORMATION_PATH_INVALID", transName));
}
RepositoryFile file = pur.getFile(absPath);
if (versionId != null) {
// need to go back to server to get versioned info
file = pur.getFileAtVersion(file.getId(), versionId);
}
// valid file
if (file == null) {
throw new KettleException(BaseMessages.getString(PKG, "PurRepository.ERROR_0008_TRANSFORMATION_PATH_INVALID", absPath));
}
NodeRepositoryFileData data = null;
ObjectRevision revision = null;
// Additional obfuscation through obscurity
data = pur.getDataAtVersionForRead(file.getId(), versionId, NodeRepositoryFileData.class);
revision = getObjectRevision(new StringObjectId(file.getId().toString()), versionId);
TransMeta transMeta = buildTransMeta(file, parentDir, data, revision);
ExtensionPointHandler.callExtensionPoint(log, KettleExtensionPoint.TransformationMetaLoaded.id, transMeta);
return transMeta;
} catch (final KettleException ke) {
// if we have a KettleException, simply re-throw it
throw ke;
} catch (Exception e) {
throw new KettleException("Unable to load transformation from path [" + absPath + "]", e);
}
}
use of org.pentaho.platform.api.repository2.unified.data.node.NodeRepositoryFileData in project pentaho-kettle by pentaho.
the class PurRepository method readSharedObjects.
/**
* Read shared objects of the types provided from the repository. Every {@link SharedObjectInterface} that is read
* will be fully loaded as if it has been loaded through {@link #loadDatabaseMeta(ObjectId, String)},
* {@link #loadClusterSchema(ObjectId, List, String)}, etc.
* <p>
* This method was introduced to reduce the number of server calls for loading shared objects to a constant number:
* {@code 2 + n, where n is the number of types requested}.
* </p>
*
* @param sharedObjectsByType
* Map of type to shared objects. Each map entry will contain a non-null {@link List} of
* {@link RepositoryObjectType}s for every type provided. Only entries for types provided will be altered.
* @param types
* Types of repository objects to read from the repository
* @throws KettleException
*/
protected void readSharedObjects(Map<RepositoryObjectType, List<? extends SharedObjectInterface>> sharedObjectsByType, RepositoryObjectType... types) throws KettleException {
// Overview:
// 1) We will fetch RepositoryFile, NodeRepositoryFileData, and VersionSummary for all types provided.
// 2) We assume that unless an exception is thrown every RepositoryFile returned by getFilesByType(..) have a
// matching NodeRepositoryFileData and VersionSummary.
// 3) With all files, node data, and versions in hand we will iterate over them, merging them back into usable
// shared objects
List<RepositoryFile> allFiles = new ArrayList<RepositoryFile>();
// Since type is not preserved in the RepositoryFile we fetch files by type so we don't rely on parsing the name to
// determine type afterward
// Map must be ordered or we can't match up files with data and version summary
LinkedHashMap<RepositoryObjectType, List<RepositoryFile>> filesByType = getFilesByType(allFiles, types);
try {
List<NodeRepositoryFileData> data = pur.getDataForReadInBatch(allFiles, NodeRepositoryFileData.class);
List<VersionSummary> versions = pur.getVersionSummaryInBatch(allFiles);
// Only need one iterator for all data and versions. We will work through them as we process the files by type, in
// order.
Iterator<NodeRepositoryFileData> dataIter = data.iterator();
Iterator<VersionSummary> versionsIter = versions.iterator();
// Assemble into completely loaded SharedObjectInterfaces by type
for (Entry<RepositoryObjectType, List<RepositoryFile>> entry : filesByType.entrySet()) {
SharedObjectAssembler<?> assembler = sharedObjectAssemblerMap.get(entry.getKey());
if (assembler == null) {
throw new UnsupportedOperationException(// $NON-NLS-1$
String.format("Cannot assemble shared object of type [%s]", entry.getKey()));
}
// For all files of this type, assemble them from the pieces of data pulled from the repository
Iterator<RepositoryFile> filesIter = entry.getValue().iterator();
List<SharedObjectInterface> sharedObjects = new ArrayList<SharedObjectInterface>(entry.getValue().size());
// (no need to check for next on all iterators)
while (filesIter.hasNext()) {
RepositoryFile file = filesIter.next();
NodeRepositoryFileData repoData = dataIter.next();
VersionSummary version = versionsIter.next();
// TODO: inexistent db types can cause exceptions assembling; prevent total failure
try {
sharedObjects.add(assembler.assemble(file, repoData, version));
} catch (Exception ex) {
// TODO i18n
getLog().logError("Unable to load shared objects", ex);
}
}
sharedObjectsByType.put(entry.getKey(), sharedObjects);
}
} catch (Exception ex) {
// $NON-NLS-1$
throw new KettleException("Unable to load shared objects", ex);
}
}
use of org.pentaho.platform.api.repository2.unified.data.node.NodeRepositoryFileData in project pentaho-kettle by pentaho.
the class PurRepository method loadSlaveServer.
@Override
public SlaveServer loadSlaveServer(ObjectId idSlaveServer, String versionId) throws KettleException {
try {
NodeRepositoryFileData data = pur.getDataAtVersionForRead(idSlaveServer.getId(), versionId, NodeRepositoryFileData.class);
RepositoryFile file = null;
if (versionId != null) {
file = pur.getFileAtVersion(idSlaveServer.getId(), versionId);
} else {
file = pur.getFileById(idSlaveServer.getId());
}
return slaveTransformer.assemble(file, data, pur.getVersionSummary(idSlaveServer.getId(), versionId));
} catch (Exception e) {
throw new KettleException("Unable to load slave server with id [" + idSlaveServer + "]", e);
}
}
use of org.pentaho.platform.api.repository2.unified.data.node.NodeRepositoryFileData in project pentaho-kettle by pentaho.
the class PurRepository method loadDatabaseMeta.
@Override
public DatabaseMeta loadDatabaseMeta(final ObjectId databaseId, final String versionId) throws KettleException {
try {
NodeRepositoryFileData data = pur.getDataAtVersionForRead(databaseId.getId(), versionId, NodeRepositoryFileData.class);
RepositoryFile file = null;
if (versionId != null) {
file = pur.getFileAtVersion(databaseId.getId(), versionId);
} else {
file = pur.getFileById(databaseId.getId());
}
return databaseMetaTransformer.assemble(file, data, pur.getVersionSummary(databaseId.getId(), versionId));
} catch (Exception e) {
throw new KettleException("Unable to load database with id [" + databaseId + "]", e);
}
}
use of org.pentaho.platform.api.repository2.unified.data.node.NodeRepositoryFileData in project pentaho-kettle by pentaho.
the class PurRepository method loadJobs.
/**
* Load all jobs referenced by {@code files}.
*
* @param monitor
* @param log
* @param files
* Job files to load.
* @param setInternalVariables
* Should internal variables be set when loading? (Note: THIS IS IGNORED, they are always set)
* @return Loaded jobs
* @throws KettleException
* Error loading data for jobs from repository
*/
protected List<JobMeta> loadJobs(final ProgressMonitorListener monitor, final LogChannelInterface log, final List<RepositoryFile> files, final boolean setInternalVariables) throws KettleException {
List<JobMeta> jobs = new ArrayList<JobMeta>(files.size());
List<NodeRepositoryFileData> filesData = pur.getDataForReadInBatch(files, NodeRepositoryFileData.class);
List<VersionSummary> versions = pur.getVersionSummaryInBatch(files);
Iterator<RepositoryFile> filesIter = files.iterator();
Iterator<NodeRepositoryFileData> filesDataIter = filesData.iterator();
Iterator<VersionSummary> versionsIter = versions.iterator();
while ((monitor == null || !monitor.isCanceled()) && filesIter.hasNext()) {
RepositoryFile file = filesIter.next();
NodeRepositoryFileData fileData = filesDataIter.next();
VersionSummary version = versionsIter.next();
try {
String dirPath = file.getPath().substring(0, file.getPath().lastIndexOf(RepositoryDirectory.DIRECTORY_SEPARATOR));
log.logDetailed("Loading/Exporting job [{0} : {1}] ({2})", dirPath, file.getTitle(), // $NON-NLS-1$
file.getPath());
if (monitor != null) {
// $NON-NLS-1$ //$NON-NLS-2$
monitor.subTask("Exporting job [" + file.getPath() + "]");
}
JobMeta jobMeta = buildJobMeta(file, findDirectory(dirPath), fileData, createObjectRevision(version));
ExtensionPointHandler.callExtensionPoint(log, KettleExtensionPoint.JobMetaLoaded.id, jobMeta);
jobs.add(jobMeta);
} catch (Exception ex) {
// $NON-NLS-1$ //$NON-NLS-2$
log.logError("Unable to load job [" + file.getPath() + "]", ex);
}
}
return jobs;
}
Aggregations