use of org.apache.hadoop.hbase.TableName in project hbase by apache.
the class RSGroupInfoManagerImpl method moveTables.
@Override
public synchronized void moveTables(Set<TableName> tableNames, String groupName) throws IOException {
if (groupName != null && !rsGroupMap.containsKey(groupName)) {
throw new DoNotRetryIOException("Group " + groupName + " does not exist or is a special group");
}
Map<String, RSGroupInfo> newGroupMap = Maps.newHashMap(rsGroupMap);
for (TableName tableName : tableNames) {
if (tableMap.containsKey(tableName)) {
RSGroupInfo src = new RSGroupInfo(newGroupMap.get(tableMap.get(tableName)));
src.removeTable(tableName);
newGroupMap.put(src.getName(), src);
}
if (groupName != null) {
RSGroupInfo dst = new RSGroupInfo(newGroupMap.get(groupName));
dst.addTable(tableName);
newGroupMap.put(dst.getName(), dst);
}
}
flushConfig(newGroupMap);
}
use of org.apache.hadoop.hbase.TableName in project hbase by apache.
the class BackupAdminImpl method deleteBackups.
@Override
public int deleteBackups(String[] backupIds) throws IOException {
// TODO: requires Fault tolerance support, failure will leave system
// in a non-consistent state
// see HBASE-15227
int totalDeleted = 0;
Map<String, HashSet<TableName>> allTablesMap = new HashMap<String, HashSet<TableName>>();
try (final BackupSystemTable sysTable = new BackupSystemTable(conn)) {
for (int i = 0; i < backupIds.length; i++) {
BackupInfo info = sysTable.readBackupInfo(backupIds[i]);
if (info != null) {
String rootDir = info.getBackupRootDir();
HashSet<TableName> allTables = allTablesMap.get(rootDir);
if (allTables == null) {
allTables = new HashSet<TableName>();
allTablesMap.put(rootDir, allTables);
}
allTables.addAll(info.getTableNames());
totalDeleted += deleteBackup(backupIds[i], sysTable);
}
}
finalizeDelete(allTablesMap, sysTable);
}
return totalDeleted;
}
use of org.apache.hadoop.hbase.TableName in project hbase by apache.
the class BackupAdminImpl method getAffectedBackupInfos.
private List<BackupInfo> getAffectedBackupInfos(BackupInfo backupInfo, TableName tn, BackupSystemTable table) throws IOException {
LOG.debug("GetAffectedBackupInfos for: " + backupInfo.getBackupId() + " table=" + tn);
long ts = backupInfo.getStartTs();
List<BackupInfo> list = new ArrayList<BackupInfo>();
List<BackupInfo> history = table.getBackupHistory(backupInfo.getBackupRootDir());
// break when backupInfo reached
for (BackupInfo info : history) {
if (info.getStartTs() == ts) {
break;
}
List<TableName> tables = info.getTableNames();
if (tables.contains(tn)) {
BackupType bt = info.getType();
if (bt == BackupType.FULL) {
// Clear list if we encounter FULL backup
list.clear();
} else {
LOG.debug("GetAffectedBackupInfos for: " + backupInfo.getBackupId() + " table=" + tn + " added " + info.getBackupId() + " tables=" + info.getTableListAsString());
list.add(info);
}
}
}
return list;
}
use of org.apache.hadoop.hbase.TableName in project hbase by apache.
the class BackupManager method createBackupInfo.
/**
* Creates a backup info based on input backup request.
* @param backupId backup id
* @param type type
* @param tableList table list
* @param targetRootDir root dir
* @param workers number of parallel workers
* @param bandwidth bandwidth per worker in MB per sec
* @return BackupInfo
* @throws BackupException exception
*/
public BackupInfo createBackupInfo(String backupId, BackupType type, List<TableName> tableList, String targetRootDir, int workers, long bandwidth) throws BackupException {
if (targetRootDir == null) {
throw new BackupException("Wrong backup request parameter: target backup root directory");
}
if (type == BackupType.FULL && (tableList == null || tableList.isEmpty())) {
// If table list is null for full backup, which means backup all tables. Then fill the table
// list with all user tables from meta. It no table available, throw the request exception.
HTableDescriptor[] htds = null;
try (Admin admin = conn.getAdmin()) {
htds = admin.listTables();
} catch (Exception e) {
throw new BackupException(e);
}
if (htds == null) {
throw new BackupException("No table exists for full backup of all tables.");
} else {
tableList = new ArrayList<>();
for (HTableDescriptor hTableDescriptor : htds) {
TableName tn = hTableDescriptor.getTableName();
if (tn.equals(BackupSystemTable.getTableName(conf))) {
// skip backup system table
continue;
}
tableList.add(hTableDescriptor.getTableName());
}
LOG.info("Full backup all the tables available in the cluster: " + tableList);
}
}
// there are one or more tables in the table list
backupInfo = new BackupInfo(backupId, type, tableList.toArray(new TableName[tableList.size()]), targetRootDir);
backupInfo.setBandwidth(bandwidth);
backupInfo.setWorkers(workers);
return backupInfo;
}
use of org.apache.hadoop.hbase.TableName in project hbase by apache.
the class BackupManifest method canCoverImage.
/**
* Check whether backup image set could cover a backup image or not.
* @param fullImages The backup image set
* @param image The target backup image
* @return true if fullImages can cover image, otherwise false
*/
public static boolean canCoverImage(ArrayList<BackupImage> fullImages, BackupImage image) {
// - sum table set of fullImages must cover the table set of image.
for (BackupImage image1 : fullImages) {
if (image1.getType() == BackupType.INCREMENTAL) {
return false;
}
if (image1.getStartTs() < image.getStartTs()) {
return false;
}
}
ArrayList<String> image1TableList = new ArrayList<String>();
for (BackupImage image1 : fullImages) {
List<TableName> tableList = image1.getTableNames();
for (TableName table : tableList) {
image1TableList.add(table.getNameAsString());
}
}
ArrayList<String> image2TableList = new ArrayList<String>();
List<TableName> tableList = image.getTableNames();
for (TableName table : tableList) {
image2TableList.add(table.getNameAsString());
}
for (int i = 0; i < image2TableList.size(); i++) {
if (image1TableList.contains(image2TableList.get(i)) == false) {
return false;
}
}
LOG.debug("Full image set can cover image " + image.getBackupId());
return true;
}
Aggregations