use of org.apache.samza.SamzaException in project samza by apache.
the class ClusterBasedJobCoordinator method main.
/**
* The entry point for the {@link ClusterBasedJobCoordinator}
* @param args args
*/
public static void main(String[] args) {
Config coordinatorSystemConfig = null;
final String coordinatorSystemEnv = System.getenv(ShellCommandConfig.ENV_COORDINATOR_SYSTEM_CONFIG());
try {
//Read and parse the coordinator system config.
log.info("Parsing coordinator system config {}", coordinatorSystemEnv);
coordinatorSystemConfig = new MapConfig(SamzaObjectMapper.getObjectMapper().readValue(coordinatorSystemEnv, Config.class));
} catch (IOException e) {
log.error("Exception while reading coordinator stream config {}", e);
throw new SamzaException(e);
}
ClusterBasedJobCoordinator jc = new ClusterBasedJobCoordinator(coordinatorSystemConfig);
jc.run();
}
use of org.apache.samza.SamzaException in project samza by apache.
the class TestSamzaContainerExceptionHandler method testExceptionHandler.
@Test
public void testExceptionHandler() {
final AtomicBoolean exitCalled = new AtomicBoolean(false);
Thread.UncaughtExceptionHandler exceptionHandler = new SamzaContainerExceptionHandler(() -> exitCalled.getAndSet(true));
exceptionHandler.uncaughtException(Thread.currentThread(), new SamzaException());
assertTrue(exitCalled.get());
}
use of org.apache.samza.SamzaException in project samza by apache.
the class DirectoryPartitioner method validateAndGetOriginalFilteredFiles.
/*
* This class holds the assumption that the directory remains immutable.
* If the directory does changes:
* ignore new files showing up in the directory based on an old version of partition descriptor;
* throw {@link org.apache.samza.SamzaException} if at least one old file doesn't exist anymore
*/
private List<FileMetadata> validateAndGetOriginalFilteredFiles(List<FileMetadata> newFileList, Map<Partition, List<String>> existingPartitionDescriptor) {
assert newFileList != null;
assert existingPartitionDescriptor != null;
Set<String> oldFileSet = new HashSet<>();
existingPartitionDescriptor.values().forEach(oldFileSet::addAll);
Set<String> newFileSet = new HashSet<>();
newFileList.forEach(file -> newFileSet.add(file.getPath()));
if (!newFileSet.containsAll(oldFileSet)) {
throw new SamzaException("The list of new files is not a super set of the old files. diff = " + oldFileSet.removeAll(newFileSet));
}
Iterator<FileMetadata> iterator = newFileList.iterator();
while (iterator.hasNext()) {
FileMetadata file = iterator.next();
if (!oldFileSet.contains(file.getPath())) {
iterator.remove();
}
}
return newFileList;
}
use of org.apache.samza.SamzaException in project samza by apache.
the class RemoteApplicationRunner method status.
@Override
public ApplicationStatus status(StreamApplication app) {
try {
boolean hasNewJobs = false;
boolean hasRunningJobs = false;
ApplicationStatus unsuccessfulFinishStatus = null;
ExecutionPlan plan = getExecutionPlan(app);
for (JobConfig jobConfig : plan.getJobConfigs()) {
JobRunner runner = new JobRunner(jobConfig);
ApplicationStatus status = runner.status();
log.debug("Status is {} for job {}", new Object[] { status, jobConfig.getName() });
switch(status.getStatusCode()) {
case New:
hasNewJobs = true;
break;
case Running:
hasRunningJobs = true;
break;
case UnsuccessfulFinish:
unsuccessfulFinishStatus = status;
break;
case SuccessfulFinish:
break;
default:
}
}
if (hasNewJobs) {
// There are jobs not started, report as New
return ApplicationStatus.New;
} else if (hasRunningJobs) {
// All jobs are started, some are running
return ApplicationStatus.Running;
} else if (unsuccessfulFinishStatus != null) {
// All jobs are finished, some are not successful
return unsuccessfulFinishStatus;
} else {
// All jobs are finished successfully
return ApplicationStatus.SuccessfulFinish;
}
} catch (Throwable t) {
throw new SamzaException("Failed to get status for application", t);
}
}
use of org.apache.samza.SamzaException in project samza by apache.
the class StorageRecovery method getSystemFactoriesAndAdmins.
/**
* get the SystemFactories and SystemAdmins specified in the config file and
* put them into the maps
*/
private void getSystemFactoriesAndAdmins() {
JavaSystemConfig systemConfig = new JavaSystemConfig(jobConfig);
List<String> systems = systemConfig.getSystemNames();
for (String system : systems) {
String systemFactory = systemConfig.getSystemFactory(system);
if (systemFactory == null) {
throw new SamzaException("A stream uses system " + system + " which is missing from the configuration.");
}
systemFactories.put(system, Util.<SystemFactory>getObj(systemFactory));
systemAdmins.put(system, Util.<SystemFactory>getObj(systemFactory).getAdmin(system, jobConfig));
}
log.info("Got system factories: " + systemFactories.keySet().toString());
log.info("Got system admins: " + systemAdmins.keySet().toString());
}
Aggregations