use of org.apache.hadoop.hbase.regionserver.HRegionFileSystem in project hbase by apache.
the class SnapshotManifestV1 method buildManifestFromDisk.
static SnapshotRegionManifest buildManifestFromDisk(final Configuration conf, final FileSystem fs, final Path tableDir, final RegionInfo regionInfo) throws IOException {
HRegionFileSystem regionFs = HRegionFileSystem.openRegionFromFileSystem(conf, fs, tableDir, regionInfo, true);
SnapshotRegionManifest.Builder manifest = SnapshotRegionManifest.newBuilder();
// 1. dump region meta info into the snapshot directory
LOG.debug("Storing region-info for snapshot.");
manifest.setRegionInfo(ProtobufUtil.toRegionInfo(regionInfo));
// 2. iterate through all the stores in the region
LOG.debug("Creating references for hfiles");
// This ensures that we have an atomic view of the directory as long as we have < ls limit
// (batch size of the files in a directory) on the namenode. Otherwise, we get back the files in
// batches and may miss files being added/deleted. This could be more robust (iteratively
// checking to see if we have all the files until we are sure), but the limit is currently 1000
// files/batch, far more than the number of store files under a single column family.
Collection<String> familyNames = regionFs.getFamilies();
if (familyNames != null) {
for (String familyName : familyNames) {
Collection<StoreFileInfo> storeFiles = regionFs.getStoreFiles(familyName, false);
if (storeFiles == null) {
LOG.debug("No files under family: " + familyName);
continue;
}
// 2.1. build the snapshot reference for the store
SnapshotRegionManifest.FamilyFiles.Builder family = SnapshotRegionManifest.FamilyFiles.newBuilder();
family.setFamilyName(UnsafeByteOperations.unsafeWrap(Bytes.toBytes(familyName)));
if (LOG.isDebugEnabled()) {
LOG.debug("Adding snapshot references for " + storeFiles + " hfiles");
}
// 2.2. iterate through all the store's files and create "references".
int i = 0;
int sz = storeFiles.size();
for (StoreFileInfo storeFile : storeFiles) {
// create "reference" to this store file.
LOG.debug("Adding reference for file (" + (++i) + "/" + sz + "): " + storeFile.getPath());
SnapshotRegionManifest.StoreFile.Builder sfManifest = SnapshotRegionManifest.StoreFile.newBuilder();
sfManifest.setName(storeFile.getPath().getName());
family.addStoreFiles(sfManifest.build());
}
manifest.addFamilyFiles(family.build());
}
}
return manifest.build();
}
use of org.apache.hadoop.hbase.regionserver.HRegionFileSystem in project hbase by apache.
the class TestMasterRegionInitialize method testUpgrade.
@Test
public void testUpgrade() throws IOException {
Path rootDir = new Path(htu.getDataTestDir(), REGION_DIR_NAME);
Path tableDir = CommonFSUtils.getTableDir(rootDir, region.region.getTableDescriptor().getTableName());
Path initializingFlag = new Path(tableDir, MasterRegion.INITIALIZING_FLAG);
Path initializedFlag = new Path(tableDir, MasterRegion.INITIALIZED_FLAG);
HRegionFileSystem hfs = region.region.getRegionFileSystem();
assertFalse(hfs.getFileSystem().exists(initializingFlag));
assertTrue(hfs.getFileSystem().exists(initializedFlag));
byte[] row = Bytes.toBytes("row");
byte[] cf = CF1;
byte[] cq = Bytes.toBytes("qual");
byte[] value = Bytes.toBytes("value");
region.update(r -> r.put(new Put(row).addColumn(cf, cq, value)));
assertEquals(FlushResult.Result.FLUSHED_NO_COMPACTION_NEEDED, region.flush(true).getResult());
// delete initialized flag to simulate old implementation
hfs.getFileSystem().delete(initializedFlag, true);
FSTableDescriptors.deleteTableDescriptors(hfs.getFileSystem(), tableDir);
assertNull(FSTableDescriptors.getTableDescriptorFromFs(hfs.getFileSystem(), tableDir));
// reopen, with new file tracker
region.close(false);
htu.getConfiguration().set(StoreFileTrackerFactory.TRACKER_IMPL, StoreFileTrackerFactory.Trackers.FILE.name());
createMasterRegion();
// make sure we successfully upgrade to new implementation without data loss
hfs = region.region.getRegionFileSystem();
assertFalse(hfs.getFileSystem().exists(initializingFlag));
assertTrue(hfs.getFileSystem().exists(initializedFlag));
TableDescriptor td = FSTableDescriptors.getTableDescriptorFromFs(hfs.getFileSystem(), tableDir);
assertEquals(StoreFileTrackerFactory.Trackers.FILE.name(), td.getValue(StoreFileTrackerFactory.TRACKER_IMPL));
assertArrayEquals(value, region.get(new Get(row)).getValue(cf, cq));
}
use of org.apache.hadoop.hbase.regionserver.HRegionFileSystem in project hbase by apache.
the class TestMasterRegionInitialize method testInitializingCleanup.
@Test
public void testInitializingCleanup() throws IOException {
Path rootDir = new Path(htu.getDataTestDir(), REGION_DIR_NAME);
Path tableDir = CommonFSUtils.getTableDir(rootDir, region.region.getTableDescriptor().getTableName());
Path initializingFlag = new Path(tableDir, MasterRegion.INITIALIZING_FLAG);
Path initializedFlag = new Path(tableDir, MasterRegion.INITIALIZED_FLAG);
HRegionFileSystem hfs = region.region.getRegionFileSystem();
assertFalse(hfs.getFileSystem().exists(initializingFlag));
assertTrue(hfs.getFileSystem().exists(initializedFlag));
byte[] row = Bytes.toBytes("row");
byte[] cf = CF1;
byte[] cq = Bytes.toBytes("qual");
byte[] value = Bytes.toBytes("value");
region.update(r -> r.put(new Put(row).addColumn(cf, cq, value)));
// delete initialized flag and touch a initializing flag, to simulate initializing in progress
hfs.getFileSystem().delete(initializedFlag, true);
if (!hfs.getFileSystem().mkdirs(initializingFlag)) {
throw new IOException("can not touch " + initializedFlag);
}
region.close(false);
createMasterRegion();
hfs = region.region.getRegionFileSystem();
assertFalse(hfs.getFileSystem().exists(initializingFlag));
assertTrue(hfs.getFileSystem().exists(initializedFlag));
// but the data should have been cleaned up
assertTrue(region.get(new Get(row)).isEmpty());
}
use of org.apache.hadoop.hbase.regionserver.HRegionFileSystem in project hbase by apache.
the class TestSnapshotStoreFileSize method testIsStoreFileSizeMatchFilesystemAndManifest.
@Test
public void testIsStoreFileSizeMatchFilesystemAndManifest() throws IOException {
admin = UTIL.getAdmin();
fs = UTIL.getTestFileSystem();
UTIL.createTable(TABLE_NAME, FAMILY_NAME.getBytes());
Table table = admin.getConnection().getTable(TABLE_NAME);
UTIL.loadRandomRows(table, FAMILY_NAME.getBytes(), 3, 1000);
admin.snapshot(SNAPSHOT_NAME, TABLE_NAME);
Map<String, Long> storeFileInfoFromManifest = new HashMap<String, Long>();
Map<String, Long> storeFileInfoFromFS = new HashMap<String, Long>();
String storeFileName = "";
long storeFilesize = 0L;
Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(SNAPSHOT_NAME, UTIL.getDefaultRootDirPath());
SnapshotDescription snapshotDesc = SnapshotDescriptionUtils.readSnapshotInfo(fs, snapshotDir);
SnapshotManifest snaphotManifest = SnapshotManifest.open(conf, fs, snapshotDir, snapshotDesc);
List<SnapshotRegionManifest> regionManifest = snaphotManifest.getRegionManifests();
for (int i = 0; i < regionManifest.size(); i++) {
SnapshotRegionManifest.FamilyFiles family = regionManifest.get(i).getFamilyFiles(0);
List<SnapshotRegionManifest.StoreFile> storeFiles = family.getStoreFilesList();
for (int j = 0; j < storeFiles.size(); j++) {
storeFileName = storeFiles.get(j).getName();
storeFilesize = storeFiles.get(j).getFileSize();
storeFileInfoFromManifest.put(storeFileName, storeFilesize);
}
}
List<RegionInfo> regionsInfo = admin.getRegions(TABLE_NAME);
Path path = CommonFSUtils.getTableDir(UTIL.getDefaultRootDirPath(), TABLE_NAME);
for (RegionInfo regionInfo : regionsInfo) {
HRegionFileSystem hRegionFileSystem = HRegionFileSystem.openRegionFromFileSystem(conf, fs, path, regionInfo, true);
Collection<StoreFileInfo> storeFilesFS = hRegionFileSystem.getStoreFiles(FAMILY_NAME);
Iterator<StoreFileInfo> sfIterator = storeFilesFS.iterator();
while (sfIterator.hasNext()) {
StoreFileInfo sfi = sfIterator.next();
FileStatus[] fileStatus = CommonFSUtils.listStatus(fs, sfi.getPath());
storeFileName = fileStatus[0].getPath().getName();
storeFilesize = fileStatus[0].getLen();
storeFileInfoFromFS.put(storeFileName, storeFilesize);
}
}
Assert.assertEquals(storeFileInfoFromManifest, storeFileInfoFromFS);
}
use of org.apache.hadoop.hbase.regionserver.HRegionFileSystem in project hbase by apache.
the class TestMajorCompactionTTLRequest method makeMockRequest.
private MajorCompactionTTLRequest makeMockRequest(List<StoreFileInfo> storeFiles) throws IOException {
Connection connection = mock(Connection.class);
RegionInfo regionInfo = mock(RegionInfo.class);
when(regionInfo.getEncodedName()).thenReturn("HBase");
when(regionInfo.getTable()).thenReturn(TableName.valueOf("foo"));
MajorCompactionTTLRequest request = new MajorCompactionTTLRequest(connection, regionInfo);
MajorCompactionTTLRequest spy = spy(request);
HRegionFileSystem fileSystem = mockFileSystem(regionInfo, false, storeFiles);
doReturn(fileSystem).when(spy).getFileSystem();
return spy;
}
Aggregations