use of java.util.Collection in project hadoop by apache.
the class TestResourceLocalizationService method testRecovery.
@Test
// mocked generics
@SuppressWarnings("unchecked")
public void testRecovery() throws Exception {
final String user1 = "user1";
final String user2 = "user2";
final ApplicationId appId1 = ApplicationId.newInstance(1, 1);
final ApplicationId appId2 = ApplicationId.newInstance(1, 2);
List<Path> localDirs = new ArrayList<Path>();
String[] sDirs = new String[4];
for (int i = 0; i < 4; ++i) {
localDirs.add(lfs.makeQualified(new Path(basedir, i + "")));
sDirs[i] = localDirs.get(i).toString();
}
conf.setStrings(YarnConfiguration.NM_LOCAL_DIRS, sDirs);
conf.setBoolean(YarnConfiguration.NM_RECOVERY_ENABLED, true);
NMMemoryStateStoreService stateStore = new NMMemoryStateStoreService();
stateStore.init(conf);
stateStore.start();
DrainDispatcher dispatcher = new DrainDispatcher();
dispatcher.init(conf);
dispatcher.start();
EventHandler<ApplicationEvent> applicationBus = mock(EventHandler.class);
dispatcher.register(ApplicationEventType.class, applicationBus);
EventHandler<ContainerEvent> containerBus = mock(EventHandler.class);
dispatcher.register(ContainerEventType.class, containerBus);
//Ignore actual localization
EventHandler<LocalizerEvent> localizerBus = mock(EventHandler.class);
dispatcher.register(LocalizerEventType.class, localizerBus);
LocalDirsHandlerService dirsHandler = new LocalDirsHandlerService();
dirsHandler.init(conf);
ResourceLocalizationService spyService = createSpyService(dispatcher, dirsHandler, stateStore);
try {
spyService.init(conf);
spyService.start();
final Application app1 = mock(Application.class);
when(app1.getUser()).thenReturn(user1);
when(app1.getAppId()).thenReturn(appId1);
final Application app2 = mock(Application.class);
when(app2.getUser()).thenReturn(user2);
when(app2.getAppId()).thenReturn(appId2);
spyService.handle(new ApplicationLocalizationEvent(LocalizationEventType.INIT_APPLICATION_RESOURCES, app1));
spyService.handle(new ApplicationLocalizationEvent(LocalizationEventType.INIT_APPLICATION_RESOURCES, app2));
dispatcher.await();
//Get a handle on the trackers after they're setup with INIT_APP_RESOURCES
LocalResourcesTracker appTracker1 = spyService.getLocalResourcesTracker(LocalResourceVisibility.APPLICATION, user1, appId1);
LocalResourcesTracker privTracker1 = spyService.getLocalResourcesTracker(LocalResourceVisibility.PRIVATE, user1, null);
LocalResourcesTracker appTracker2 = spyService.getLocalResourcesTracker(LocalResourceVisibility.APPLICATION, user2, appId2);
LocalResourcesTracker pubTracker = spyService.getLocalResourcesTracker(LocalResourceVisibility.PUBLIC, null, null);
// init containers
final Container c1 = getMockContainer(appId1, 1, user1);
final Container c2 = getMockContainer(appId2, 2, user2);
// init resources
Random r = new Random();
long seed = r.nextLong();
System.out.println("SEED: " + seed);
r.setSeed(seed);
// Send localization requests of each type.
final LocalResource privResource1 = getPrivateMockedResource(r);
final LocalResourceRequest privReq1 = new LocalResourceRequest(privResource1);
final LocalResource privResource2 = getPrivateMockedResource(r);
final LocalResourceRequest privReq2 = new LocalResourceRequest(privResource2);
final LocalResource pubResource1 = getPublicMockedResource(r);
final LocalResourceRequest pubReq1 = new LocalResourceRequest(pubResource1);
final LocalResource pubResource2 = getPublicMockedResource(r);
final LocalResourceRequest pubReq2 = new LocalResourceRequest(pubResource2);
final LocalResource appResource1 = getAppMockedResource(r);
final LocalResourceRequest appReq1 = new LocalResourceRequest(appResource1);
final LocalResource appResource2 = getAppMockedResource(r);
final LocalResourceRequest appReq2 = new LocalResourceRequest(appResource2);
final LocalResource appResource3 = getAppMockedResource(r);
final LocalResourceRequest appReq3 = new LocalResourceRequest(appResource3);
Map<LocalResourceVisibility, Collection<LocalResourceRequest>> req1 = new HashMap<LocalResourceVisibility, Collection<LocalResourceRequest>>();
req1.put(LocalResourceVisibility.PRIVATE, Arrays.asList(new LocalResourceRequest[] { privReq1, privReq2 }));
req1.put(LocalResourceVisibility.PUBLIC, Collections.singletonList(pubReq1));
req1.put(LocalResourceVisibility.APPLICATION, Collections.singletonList(appReq1));
Map<LocalResourceVisibility, Collection<LocalResourceRequest>> req2 = new HashMap<LocalResourceVisibility, Collection<LocalResourceRequest>>();
req2.put(LocalResourceVisibility.APPLICATION, Arrays.asList(new LocalResourceRequest[] { appReq2, appReq3 }));
req2.put(LocalResourceVisibility.PUBLIC, Collections.singletonList(pubReq2));
// Send Request event
spyService.handle(new ContainerLocalizationRequestEvent(c1, req1));
spyService.handle(new ContainerLocalizationRequestEvent(c2, req2));
dispatcher.await();
// Simulate start of localization for all resources
privTracker1.getPathForLocalization(privReq1, dirsHandler.getLocalPathForWrite(ContainerLocalizer.USERCACHE + user1), null);
privTracker1.getPathForLocalization(privReq2, dirsHandler.getLocalPathForWrite(ContainerLocalizer.USERCACHE + user1), null);
LocalizedResource privLr1 = privTracker1.getLocalizedResource(privReq1);
LocalizedResource privLr2 = privTracker1.getLocalizedResource(privReq2);
appTracker1.getPathForLocalization(appReq1, dirsHandler.getLocalPathForWrite(ContainerLocalizer.APPCACHE + appId1), null);
LocalizedResource appLr1 = appTracker1.getLocalizedResource(appReq1);
appTracker2.getPathForLocalization(appReq2, dirsHandler.getLocalPathForWrite(ContainerLocalizer.APPCACHE + appId2), null);
LocalizedResource appLr2 = appTracker2.getLocalizedResource(appReq2);
appTracker2.getPathForLocalization(appReq3, dirsHandler.getLocalPathForWrite(ContainerLocalizer.APPCACHE + appId2), null);
LocalizedResource appLr3 = appTracker2.getLocalizedResource(appReq3);
pubTracker.getPathForLocalization(pubReq1, dirsHandler.getLocalPathForWrite(ContainerLocalizer.FILECACHE), null);
LocalizedResource pubLr1 = pubTracker.getLocalizedResource(pubReq1);
pubTracker.getPathForLocalization(pubReq2, dirsHandler.getLocalPathForWrite(ContainerLocalizer.FILECACHE), null);
LocalizedResource pubLr2 = pubTracker.getLocalizedResource(pubReq2);
// Simulate completion of localization for most resources with
// possibly different sizes than in the request
assertNotNull("Localization not started", privLr1.getLocalPath());
privTracker1.handle(new ResourceLocalizedEvent(privReq1, privLr1.getLocalPath(), privLr1.getSize() + 5));
assertNotNull("Localization not started", privLr2.getLocalPath());
privTracker1.handle(new ResourceLocalizedEvent(privReq2, privLr2.getLocalPath(), privLr2.getSize() + 10));
assertNotNull("Localization not started", appLr1.getLocalPath());
appTracker1.handle(new ResourceLocalizedEvent(appReq1, appLr1.getLocalPath(), appLr1.getSize()));
assertNotNull("Localization not started", appLr3.getLocalPath());
appTracker2.handle(new ResourceLocalizedEvent(appReq3, appLr3.getLocalPath(), appLr3.getSize() + 7));
assertNotNull("Localization not started", pubLr1.getLocalPath());
pubTracker.handle(new ResourceLocalizedEvent(pubReq1, pubLr1.getLocalPath(), pubLr1.getSize() + 1000));
assertNotNull("Localization not started", pubLr2.getLocalPath());
pubTracker.handle(new ResourceLocalizedEvent(pubReq2, pubLr2.getLocalPath(), pubLr2.getSize() + 99999));
dispatcher.await();
assertEquals(ResourceState.LOCALIZED, privLr1.getState());
assertEquals(ResourceState.LOCALIZED, privLr2.getState());
assertEquals(ResourceState.LOCALIZED, appLr1.getState());
assertEquals(ResourceState.DOWNLOADING, appLr2.getState());
assertEquals(ResourceState.LOCALIZED, appLr3.getState());
assertEquals(ResourceState.LOCALIZED, pubLr1.getState());
assertEquals(ResourceState.LOCALIZED, pubLr2.getState());
// restart and recover
spyService = createSpyService(dispatcher, dirsHandler, stateStore);
spyService.init(conf);
spyService.recoverLocalizedResources(stateStore.loadLocalizationState());
dispatcher.await();
appTracker1 = spyService.getLocalResourcesTracker(LocalResourceVisibility.APPLICATION, user1, appId1);
privTracker1 = spyService.getLocalResourcesTracker(LocalResourceVisibility.PRIVATE, user1, null);
appTracker2 = spyService.getLocalResourcesTracker(LocalResourceVisibility.APPLICATION, user2, appId2);
pubTracker = spyService.getLocalResourcesTracker(LocalResourceVisibility.PUBLIC, null, null);
LocalizedResource recoveredRsrc = privTracker1.getLocalizedResource(privReq1);
assertEquals(privReq1, recoveredRsrc.getRequest());
assertEquals(privLr1.getLocalPath(), recoveredRsrc.getLocalPath());
assertEquals(privLr1.getSize(), recoveredRsrc.getSize());
assertEquals(ResourceState.LOCALIZED, recoveredRsrc.getState());
recoveredRsrc = privTracker1.getLocalizedResource(privReq2);
assertEquals(privReq2, recoveredRsrc.getRequest());
assertEquals(privLr2.getLocalPath(), recoveredRsrc.getLocalPath());
assertEquals(privLr2.getSize(), recoveredRsrc.getSize());
assertEquals(ResourceState.LOCALIZED, recoveredRsrc.getState());
recoveredRsrc = appTracker1.getLocalizedResource(appReq1);
assertEquals(appReq1, recoveredRsrc.getRequest());
assertEquals(appLr1.getLocalPath(), recoveredRsrc.getLocalPath());
assertEquals(appLr1.getSize(), recoveredRsrc.getSize());
assertEquals(ResourceState.LOCALIZED, recoveredRsrc.getState());
recoveredRsrc = appTracker2.getLocalizedResource(appReq2);
assertNull("in-progress resource should not be present", recoveredRsrc);
recoveredRsrc = appTracker2.getLocalizedResource(appReq3);
assertEquals(appReq3, recoveredRsrc.getRequest());
assertEquals(appLr3.getLocalPath(), recoveredRsrc.getLocalPath());
assertEquals(appLr3.getSize(), recoveredRsrc.getSize());
assertEquals(ResourceState.LOCALIZED, recoveredRsrc.getState());
} finally {
dispatcher.stop();
stateStore.close();
}
}
use of java.util.Collection in project hbase by apache.
the class HBaseFsck method generatePuts.
/**
* Generate set of puts to add to new meta. This expects the tables to be
* clean with no overlaps or holes. If there are any problems it returns null.
*
* @return An array list of puts to do in bulk, null if tables have problems
*/
private ArrayList<Put> generatePuts(SortedMap<TableName, TableInfo> tablesInfo) throws IOException {
ArrayList<Put> puts = new ArrayList<>();
boolean hasProblems = false;
for (Entry<TableName, TableInfo> e : tablesInfo.entrySet()) {
TableName name = e.getKey();
// skip "hbase:meta"
if (name.compareTo(TableName.META_TABLE_NAME) == 0) {
continue;
}
TableInfo ti = e.getValue();
puts.add(MetaTableAccessor.makePutFromTableState(new TableState(ti.tableName, TableState.State.ENABLED)));
for (Entry<byte[], Collection<HbckInfo>> spl : ti.sc.getStarts().asMap().entrySet()) {
Collection<HbckInfo> his = spl.getValue();
int sz = his.size();
if (sz != 1) {
// problem
LOG.error("Split starting at " + Bytes.toStringBinary(spl.getKey()) + " had " + sz + " regions instead of exactly 1.");
hasProblems = true;
continue;
}
// add the row directly to meta.
HbckInfo hi = his.iterator().next();
// hi.metaEntry;
HRegionInfo hri = hi.getHdfsHRI();
Put p = MetaTableAccessor.makePutFromRegionInfo(hri);
puts.add(p);
}
}
return hasProblems ? null : puts;
}
use of java.util.Collection in project kafka by apache.
the class StreamThreadTest method testMaybeCommit.
@Test
public void testMaybeCommit() throws Exception {
File baseDir = Files.createTempDirectory("test").toFile();
try {
final long commitInterval = 1000L;
Properties props = configProps();
props.setProperty(StreamsConfig.STATE_DIR_CONFIG, baseDir.getCanonicalPath());
props.setProperty(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG, Long.toString(commitInterval));
StreamsConfig config = new StreamsConfig(props);
final MockTime mockTime = new MockTime();
TopologyBuilder builder = new TopologyBuilder().setApplicationId("X");
builder.addSource("source1", "topic1");
MockClientSupplier mockClientSupplier = new MockClientSupplier();
StreamThread thread = new StreamThread(builder, config, mockClientSupplier, applicationId, clientId, processId, new Metrics(), mockTime, new StreamsMetadataState(builder, StreamsMetadataState.UNKNOWN_HOST), 0) {
@Override
public void maybeCommit(long now) {
super.maybeCommit(now);
}
@Override
protected StreamTask createStreamTask(TaskId id, Collection<TopicPartition> partitionsForTask) {
ProcessorTopology topology = builder.build(id.topicGroupId);
return new TestStreamTask(id, applicationId, partitionsForTask, topology, consumer, producer, restoreConsumer, config, new MockStreamsMetrics(new Metrics()), stateDirectory);
}
};
initPartitionGrouper(config, thread, mockClientSupplier);
ConsumerRebalanceListener rebalanceListener = thread.rebalanceListener;
List<TopicPartition> revokedPartitions;
List<TopicPartition> assignedPartitions;
//
// Assign t1p1 and t1p2. This should create Task 1 & 2
//
revokedPartitions = Collections.emptyList();
assignedPartitions = Arrays.asList(t1p1, t1p2);
rebalanceListener.onPartitionsRevoked(revokedPartitions);
rebalanceListener.onPartitionsAssigned(assignedPartitions);
assertEquals(2, thread.tasks().size());
// no task is committed before the commit interval
mockTime.sleep(commitInterval - 10L);
thread.maybeCommit(mockTime.milliseconds());
for (StreamTask task : thread.tasks().values()) {
assertFalse(((TestStreamTask) task).committed);
}
// all tasks are committed after the commit interval
mockTime.sleep(11L);
thread.maybeCommit(mockTime.milliseconds());
for (StreamTask task : thread.tasks().values()) {
assertTrue(((TestStreamTask) task).committed);
((TestStreamTask) task).committed = false;
}
// no task is committed before the commit interval, again
mockTime.sleep(commitInterval - 10L);
thread.maybeCommit(mockTime.milliseconds());
for (StreamTask task : thread.tasks().values()) {
assertFalse(((TestStreamTask) task).committed);
}
// all tasks are committed after the commit interval, again
mockTime.sleep(11L);
thread.maybeCommit(mockTime.milliseconds());
for (StreamTask task : thread.tasks().values()) {
assertTrue(((TestStreamTask) task).committed);
((TestStreamTask) task).committed = false;
}
} finally {
Utils.delete(baseDir);
}
}
use of java.util.Collection in project kafka by apache.
the class StreamThreadTest method shouldNotViolateAtLeastOnceWhenExceptionOccursDuringCloseTopologyWhenSuspendingState.
@Test
public void shouldNotViolateAtLeastOnceWhenExceptionOccursDuringCloseTopologyWhenSuspendingState() throws Exception {
final KStreamBuilder builder = new KStreamBuilder();
builder.setApplicationId(applicationId);
builder.stream("t1").groupByKey();
final StreamsConfig config = new StreamsConfig(configProps());
final MockClientSupplier clientSupplier = new MockClientSupplier();
final TestStreamTask testStreamTask = new TestStreamTask(new TaskId(0, 0), applicationId, Utils.mkSet(new TopicPartition("t1", 0)), builder.build(0), clientSupplier.consumer, clientSupplier.producer, clientSupplier.restoreConsumer, config, new MockStreamsMetrics(new Metrics()), new StateDirectory(applicationId, config.getString(StreamsConfig.STATE_DIR_CONFIG), time)) {
@Override
public void closeTopology() {
throw new RuntimeException("KABOOM!");
}
};
final StreamsConfig config1 = new StreamsConfig(configProps());
final StreamThread thread = new StreamThread(builder, config1, clientSupplier, applicationId, clientId, processId, new Metrics(), new MockTime(), new StreamsMetadataState(builder, StreamsMetadataState.UNKNOWN_HOST), 0) {
@Override
protected StreamTask createStreamTask(final TaskId id, final Collection<TopicPartition> partitions) {
return testStreamTask;
}
};
final Map<TaskId, Set<TopicPartition>> activeTasks = new HashMap<>();
activeTasks.put(testStreamTask.id, testStreamTask.partitions);
thread.partitionAssignor(new MockStreamsPartitionAssignor(activeTasks));
thread.rebalanceListener.onPartitionsRevoked(Collections.<TopicPartition>emptyList());
thread.rebalanceListener.onPartitionsAssigned(testStreamTask.partitions);
try {
thread.rebalanceListener.onPartitionsRevoked(Collections.<TopicPartition>emptyList());
fail("should have thrown exception");
} catch (Exception e) {
// expected
}
assertFalse(testStreamTask.committed);
}
use of java.util.Collection in project kafka by apache.
the class StreamThreadTest method testMaybeClean.
@Test
public void testMaybeClean() throws Exception {
File baseDir = Files.createTempDirectory("test").toFile();
try {
final long cleanupDelay = 1000L;
Properties props = configProps();
props.setProperty(StreamsConfig.STATE_CLEANUP_DELAY_MS_CONFIG, Long.toString(cleanupDelay));
props.setProperty(StreamsConfig.STATE_DIR_CONFIG, baseDir.getCanonicalPath());
StreamsConfig config = new StreamsConfig(props);
File applicationDir = new File(baseDir, applicationId);
applicationDir.mkdir();
File stateDir1 = new File(applicationDir, task1.toString());
File stateDir2 = new File(applicationDir, task2.toString());
File stateDir3 = new File(applicationDir, task3.toString());
File extraDir = new File(applicationDir, "X");
stateDir1.mkdir();
stateDir2.mkdir();
stateDir3.mkdir();
extraDir.mkdir();
final MockTime mockTime = new MockTime();
TopologyBuilder builder = new TopologyBuilder().setApplicationId("X");
builder.addSource("source1", "topic1");
MockClientSupplier mockClientSupplier = new MockClientSupplier();
StreamThread thread = new StreamThread(builder, config, mockClientSupplier, applicationId, clientId, processId, new Metrics(), mockTime, new StreamsMetadataState(builder, StreamsMetadataState.UNKNOWN_HOST), 0) {
@Override
public void maybeClean(long now) {
super.maybeClean(now);
}
@Override
protected StreamTask createStreamTask(TaskId id, Collection<TopicPartition> partitionsForTask) {
ProcessorTopology topology = builder.build(id.topicGroupId);
return new TestStreamTask(id, applicationId, partitionsForTask, topology, consumer, producer, restoreConsumer, config, new MockStreamsMetrics(new Metrics()), stateDirectory);
}
};
initPartitionGrouper(config, thread, mockClientSupplier);
ConsumerRebalanceListener rebalanceListener = thread.rebalanceListener;
assertTrue(thread.tasks().isEmpty());
mockTime.sleep(cleanupDelay);
// all directories exist since an assignment didn't happen
assertTrue(stateDir1.exists());
assertTrue(stateDir2.exists());
assertTrue(stateDir3.exists());
assertTrue(extraDir.exists());
List<TopicPartition> revokedPartitions;
List<TopicPartition> assignedPartitions;
Map<TaskId, StreamTask> prevTasks;
//
// Assign t1p1 and t1p2. This should create task1 & task2
//
final Map<TaskId, Set<TopicPartition>> activeTasks = new HashMap<>();
activeTasks.put(task1, Collections.singleton(t1p1));
activeTasks.put(task2, Collections.singleton(t1p2));
thread.partitionAssignor(new MockStreamsPartitionAssignor(activeTasks));
revokedPartitions = Collections.emptyList();
assignedPartitions = Arrays.asList(t1p1, t1p2);
prevTasks = new HashMap<>(thread.tasks());
rebalanceListener.onPartitionsRevoked(revokedPartitions);
rebalanceListener.onPartitionsAssigned(assignedPartitions);
// there shouldn't be any previous task
assertTrue(prevTasks.isEmpty());
// task 1 & 2 are created
assertEquals(2, thread.tasks().size());
// all directories should still exit before the cleanup delay time
mockTime.sleep(cleanupDelay - 10L);
thread.maybeClean(mockTime.milliseconds());
assertTrue(stateDir1.exists());
assertTrue(stateDir2.exists());
assertTrue(stateDir3.exists());
assertTrue(extraDir.exists());
// all state directories except for task task2 & task3 will be removed. the extra directory should still exists
mockTime.sleep(11L);
thread.maybeClean(mockTime.milliseconds());
assertTrue(stateDir1.exists());
assertTrue(stateDir2.exists());
assertFalse(stateDir3.exists());
assertTrue(extraDir.exists());
//
// Revoke t1p1 and t1p2. This should remove task1 & task2
//
activeTasks.clear();
revokedPartitions = assignedPartitions;
assignedPartitions = Collections.emptyList();
prevTasks = new HashMap<>(thread.tasks());
rebalanceListener.onPartitionsRevoked(revokedPartitions);
rebalanceListener.onPartitionsAssigned(assignedPartitions);
// previous tasks should be committed
assertEquals(2, prevTasks.size());
for (StreamTask task : prevTasks.values()) {
assertTrue(((TestStreamTask) task).committed);
((TestStreamTask) task).committed = false;
}
// no task
assertTrue(thread.tasks().isEmpty());
// all state directories for task task1 & task2 still exist before the cleanup delay time
mockTime.sleep(cleanupDelay - 10L);
thread.maybeClean(mockTime.milliseconds());
assertTrue(stateDir1.exists());
assertTrue(stateDir2.exists());
assertFalse(stateDir3.exists());
assertTrue(extraDir.exists());
// all state directories for task task1 & task2 are removed
mockTime.sleep(11L);
thread.maybeClean(mockTime.milliseconds());
assertFalse(stateDir1.exists());
assertFalse(stateDir2.exists());
assertFalse(stateDir3.exists());
assertTrue(extraDir.exists());
} finally {
Utils.delete(baseDir);
}
}
Aggregations