use of org.apache.phoenix.schema.PName in project phoenix by apache.
the class MutationState method send.
@SuppressWarnings("deprecation")
private void send(Iterator<TableRef> tableRefIterator) throws SQLException {
int i = 0;
long[] serverTimeStamps = null;
boolean sendAll = false;
if (tableRefIterator == null) {
serverTimeStamps = validateAll();
tableRefIterator = mutations.keySet().iterator();
sendAll = true;
}
MultiRowMutationState multiRowMutationState;
Map<TableInfo, List<Mutation>> physicalTableMutationMap = Maps.newLinkedHashMap();
// add tracing for this operation
try (TraceScope trace = Tracing.startNewSpan(connection, "Committing mutations to tables")) {
Span span = trace.getSpan();
ImmutableBytesWritable indexMetaDataPtr = new ImmutableBytesWritable();
while (tableRefIterator.hasNext()) {
// at this point we are going through mutations for each table
final TableRef tableRef = tableRefIterator.next();
multiRowMutationState = mutations.get(tableRef);
if (multiRowMutationState == null || multiRowMutationState.isEmpty()) {
continue;
}
// Validate as we go if transactional since we can undo if a problem occurs (which is unlikely)
long serverTimestamp = serverTimeStamps == null ? validateAndGetServerTimestamp(tableRef, multiRowMutationState) : serverTimeStamps[i++];
Long scn = connection.getSCN();
long mutationTimestamp = scn == null ? HConstants.LATEST_TIMESTAMP : scn;
final PTable table = tableRef.getTable();
Iterator<Pair<PName, List<Mutation>>> mutationsIterator = addRowMutations(tableRef, multiRowMutationState, mutationTimestamp, serverTimestamp, false, sendAll);
// build map from physical table to mutation list
boolean isDataTable = true;
while (mutationsIterator.hasNext()) {
Pair<PName, List<Mutation>> pair = mutationsIterator.next();
PName hTableName = pair.getFirst();
List<Mutation> mutationList = pair.getSecond();
TableInfo tableInfo = new TableInfo(isDataTable, hTableName, tableRef);
List<Mutation> oldMutationList = physicalTableMutationMap.put(tableInfo, mutationList);
if (oldMutationList != null)
mutationList.addAll(0, oldMutationList);
isDataTable = false;
}
// committed in the event of a failure.
if (table.isTransactional()) {
addUncommittedStatementIndexes(multiRowMutationState.values());
if (txMutations.isEmpty()) {
txMutations = Maps.newHashMapWithExpectedSize(mutations.size());
}
// Keep all mutations we've encountered until a commit or rollback.
// This is not ideal, but there's not good way to get the values back
// in the event that we need to replay the commit.
// Copy TableRef so we have the original PTable and know when the
// indexes have changed.
joinMutationState(new TableRef(tableRef), multiRowMutationState, txMutations);
}
}
long serverTimestamp = HConstants.LATEST_TIMESTAMP;
Iterator<Entry<TableInfo, List<Mutation>>> mutationsIterator = physicalTableMutationMap.entrySet().iterator();
while (mutationsIterator.hasNext()) {
Entry<TableInfo, List<Mutation>> pair = mutationsIterator.next();
TableInfo tableInfo = pair.getKey();
byte[] htableName = tableInfo.getHTableName().getBytes();
List<Mutation> mutationList = pair.getValue();
// create a span per target table
// TODO maybe we can be smarter about the table name to string here?
Span child = Tracing.child(span, "Writing mutation batch for table: " + Bytes.toString(htableName));
int retryCount = 0;
boolean shouldRetry = false;
long numMutations = 0;
long mutationSizeBytes = 0;
long mutationCommitTime = 0;
long numFailedMutations = 0;
;
long startTime = 0;
boolean shouldRetryIndexedMutation = false;
IndexWriteException iwe = null;
do {
TableRef origTableRef = tableInfo.getOrigTableRef();
PTable table = origTableRef.getTable();
table.getIndexMaintainers(indexMetaDataPtr, connection);
final ServerCache cache = tableInfo.isDataTable() ? setMetaDataOnMutations(origTableRef, mutationList, indexMetaDataPtr) : null;
// If we haven't retried yet, retry for this case only, as it's possible that
// a split will occur after we send the index metadata cache to all known
// region servers.
shouldRetry = cache != null;
SQLException sqlE = null;
HTableInterface hTable = connection.getQueryServices().getTable(htableName);
try {
if (table.isTransactional()) {
// Track tables to which we've sent uncommitted data
uncommittedPhysicalNames.add(table.getPhysicalName().getString());
// rollback
if (!table.getIndexes().isEmpty()) {
hTable = new MetaDataAwareHTable(hTable, origTableRef);
}
hTable = TransactionUtil.getPhoenixTransactionTable(phoenixTransactionContext, hTable, table);
}
numMutations = mutationList.size();
GLOBAL_MUTATION_BATCH_SIZE.update(numMutations);
mutationSizeBytes = calculateMutationSize(mutationList);
startTime = System.currentTimeMillis();
child.addTimelineAnnotation("Attempt " + retryCount);
List<List<Mutation>> mutationBatchList = getMutationBatchList(batchSize, batchSizeBytes, mutationList);
for (final List<Mutation> mutationBatch : mutationBatchList) {
if (shouldRetryIndexedMutation) {
// if there was an index write failure, retry the mutation in a loop
final HTableInterface finalHTable = hTable;
PhoenixIndexFailurePolicy.doBatchWithRetries(new MutateCommand() {
@Override
public void doMutation() throws IOException {
try {
finalHTable.batch(mutationBatch);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new IOException(e);
}
}
}, iwe, connection, connection.getQueryServices().getProps());
} else {
hTable.batch(mutationBatch);
}
batchCount++;
if (logger.isDebugEnabled())
logger.debug("Sent batch of " + mutationBatch.size() + " for " + Bytes.toString(htableName));
}
child.stop();
child.stop();
shouldRetry = false;
mutationCommitTime = System.currentTimeMillis() - startTime;
GLOBAL_MUTATION_COMMIT_TIME.update(mutationCommitTime);
numFailedMutations = 0;
// Remove batches as we process them
mutations.remove(origTableRef);
if (tableInfo.isDataTable()) {
numRows -= numMutations;
// recalculate the estimated size
estimatedSize = KeyValueUtil.getEstimatedRowMutationSize(mutations);
}
} catch (Exception e) {
mutationCommitTime = System.currentTimeMillis() - startTime;
serverTimestamp = ServerUtil.parseServerTimestamp(e);
SQLException inferredE = ServerUtil.parseServerExceptionOrNull(e);
if (inferredE != null) {
if (shouldRetry && retryCount == 0 && inferredE.getErrorCode() == SQLExceptionCode.INDEX_METADATA_NOT_FOUND.getErrorCode()) {
// Swallow this exception once, as it's possible that we split after sending the index metadata
// and one of the region servers doesn't have it. This will cause it to have it the next go around.
// If it fails again, we don't retry.
String msg = "Swallowing exception and retrying after clearing meta cache on connection. " + inferredE;
logger.warn(LogUtil.addCustomAnnotations(msg, connection));
connection.getQueryServices().clearTableRegionCache(htableName);
// add a new child span as this one failed
child.addTimelineAnnotation(msg);
child.stop();
child = Tracing.child(span, "Failed batch, attempting retry");
continue;
} else if (inferredE.getErrorCode() == SQLExceptionCode.INDEX_WRITE_FAILURE.getErrorCode()) {
iwe = PhoenixIndexFailurePolicy.getIndexWriteException(inferredE);
if (iwe != null && !shouldRetryIndexedMutation) {
// so when we retry we need to set REPLAY_WRITES
for (Mutation m : mutationList) {
m.setAttribute(BaseScannerRegionObserver.REPLAY_WRITES, BaseScannerRegionObserver.REPLAY_ONLY_INDEX_WRITES);
KeyValueUtil.setTimestamp(m, serverTimestamp);
}
shouldRetry = true;
shouldRetryIndexedMutation = true;
continue;
}
}
e = inferredE;
}
// Throw to client an exception that indicates the statements that
// were not committed successfully.
int[] uncommittedStatementIndexes = getUncommittedStatementIndexes();
sqlE = new CommitException(e, uncommittedStatementIndexes, serverTimestamp);
numFailedMutations = uncommittedStatementIndexes.length;
GLOBAL_MUTATION_BATCH_FAILED_COUNT.update(numFailedMutations);
} finally {
MutationMetric mutationsMetric = new MutationMetric(numMutations, mutationSizeBytes, mutationCommitTime, numFailedMutations);
mutationMetricQueue.addMetricsForTable(Bytes.toString(htableName), mutationsMetric);
try {
if (cache != null)
cache.close();
} finally {
try {
hTable.close();
} catch (IOException e) {
if (sqlE != null) {
sqlE.setNextException(ServerUtil.parseServerException(e));
} else {
sqlE = ServerUtil.parseServerException(e);
}
}
if (sqlE != null) {
throw sqlE;
}
}
}
} while (shouldRetry && retryCount++ < 1);
}
}
}
use of org.apache.phoenix.schema.PName in project phoenix by apache.
the class DefaultStatisticsCollector method initGuidepostDepth.
private void initGuidepostDepth() throws IOException, ClassNotFoundException, SQLException {
// First check is if guidepost info set on statement itself
if (guidePostPerRegionBytes != null || guidePostWidthBytes != null) {
int guidepostPerRegion = 0;
long guidepostWidth = QueryServicesOptions.DEFAULT_STATS_GUIDEPOST_WIDTH_BYTES;
if (guidePostPerRegionBytes != null) {
guidepostPerRegion = PInteger.INSTANCE.getCodec().decodeInt(guidePostPerRegionBytes, 0, SortOrder.getDefault());
}
if (guidePostWidthBytes != null) {
guidepostWidth = PLong.INSTANCE.getCodec().decodeInt(guidePostWidthBytes, 0, SortOrder.getDefault());
}
this.guidePostDepth = StatisticsUtil.getGuidePostDepth(guidepostPerRegion, guidepostWidth, env.getRegion().getTableDesc());
} else {
long guidepostWidth = -1;
HTableInterface htable = null;
try {
// Next check for GUIDE_POST_WIDTH on table
htable = env.getTable(SchemaUtil.getPhysicalTableName(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES, env.getConfiguration()));
Get get = new Get(ptableKey);
get.addColumn(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, PhoenixDatabaseMetaData.GUIDE_POSTS_WIDTH_BYTES);
Result result = htable.get(get);
if (!result.isEmpty()) {
Cell cell = result.listCells().get(0);
guidepostWidth = PLong.INSTANCE.getCodec().decodeLong(cell.getValueArray(), cell.getValueOffset(), SortOrder.getDefault());
} else if (!isViewIndexTable) {
/*
* The table we are collecting stats for is potentially a base table, or local
* index or a global index. For view indexes, we rely on the the guide post
* width column in the parent data table's metadata which we already tried
* retrieving above.
*/
try (Connection conn = QueryUtil.getConnectionOnServer(env.getConfiguration())) {
PTable table = PhoenixRuntime.getTable(conn, tableName);
if (table.getType() == PTableType.INDEX && table.getIndexType() == IndexType.GLOBAL) {
/*
* For global indexes, we need to get the parentName first and then
* fetch guide post width configured for the parent table.
*/
PName parentName = table.getParentName();
byte[] parentKey = SchemaUtil.getTableKeyFromFullName(parentName.getString());
get = new Get(parentKey);
get.addColumn(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, PhoenixDatabaseMetaData.GUIDE_POSTS_WIDTH_BYTES);
result = htable.get(get);
if (!result.isEmpty()) {
Cell cell = result.listCells().get(0);
guidepostWidth = PLong.INSTANCE.getCodec().decodeLong(cell.getValueArray(), cell.getValueOffset(), SortOrder.getDefault());
}
}
}
}
} finally {
if (htable != null) {
try {
htable.close();
} catch (IOException e) {
LOG.warn("Failed to close " + htable.getName(), e);
}
}
}
if (guidepostWidth >= 0) {
this.guidePostDepth = guidepostWidth;
} else {
// Last use global config value
Configuration config = env.getConfiguration();
this.guidePostDepth = StatisticsUtil.getGuidePostDepth(config.getInt(QueryServices.STATS_GUIDEPOST_PER_REGION_ATTRIB, QueryServicesOptions.DEFAULT_STATS_GUIDEPOST_PER_REGION), config.getLong(QueryServices.STATS_GUIDEPOST_WIDTH_BYTES_ATTRIB, QueryServicesOptions.DEFAULT_STATS_GUIDEPOST_WIDTH_BYTES), env.getRegion().getTableDesc());
}
}
}
use of org.apache.phoenix.schema.PName in project phoenix by apache.
the class AlterTableWithViewsIT method testMakeBaseTableTransactional.
@Test
public void testMakeBaseTableTransactional() throws Exception {
try (Connection conn = DriverManager.getConnection(getUrl());
Connection viewConn = isMultiTenant ? DriverManager.getConnection(TENANT_SPECIFIC_URL1) : conn) {
String baseTableName = "NONTXNTBL_" + generateUniqueName() + (isMultiTenant ? "0" : "1");
String viewOfTable = baseTableName + "_VIEW";
String ddlFormat = "CREATE TABLE IF NOT EXISTS " + baseTableName + " (" + " %s ID char(1) NOT NULL," + " COL1 integer NOT NULL," + " COL2 bigint NOT NULL," + " CONSTRAINT NAME_PK PRIMARY KEY (%s ID, COL1, COL2)" + " ) %s";
conn.createStatement().execute(generateDDL(ddlFormat));
assertTableDefinition(conn, baseTableName, PTableType.TABLE, null, 0, 3, QueryConstants.BASE_TABLE_BASE_COLUMN_COUNT, "ID", "COL1", "COL2");
viewConn.createStatement().execute("CREATE VIEW " + viewOfTable + " ( VIEW_COL1 DECIMAL(10,2), VIEW_COL2 VARCHAR ) AS SELECT * FROM " + baseTableName);
assertTableDefinition(conn, viewOfTable, PTableType.VIEW, baseTableName, 0, 5, 3, "ID", "COL1", "COL2", "VIEW_COL1", "VIEW_COL2");
PName tenantId = isMultiTenant ? PNameFactory.newName("tenant1") : null;
PhoenixConnection phoenixConn = conn.unwrap(PhoenixConnection.class);
HTableInterface htable = phoenixConn.getQueryServices().getTable(Bytes.toBytes(baseTableName));
assertFalse(htable.getTableDescriptor().getCoprocessors().contains(PhoenixTransactionalProcessor.class.getName()));
assertFalse(phoenixConn.getTable(new PTableKey(null, baseTableName)).isTransactional());
assertFalse(viewConn.unwrap(PhoenixConnection.class).getTable(new PTableKey(tenantId, viewOfTable)).isTransactional());
// make the base table transactional
conn.createStatement().execute("ALTER TABLE " + baseTableName + " SET TRANSACTIONAL=true");
// query the view to force the table cache to be updated
viewConn.createStatement().execute("SELECT * FROM " + viewOfTable);
htable = phoenixConn.getQueryServices().getTable(Bytes.toBytes(baseTableName));
assertTrue(htable.getTableDescriptor().getCoprocessors().contains(PhoenixTransactionalProcessor.class.getName()));
assertTrue(phoenixConn.getTable(new PTableKey(null, baseTableName)).isTransactional());
assertTrue(viewConn.unwrap(PhoenixConnection.class).getTable(new PTableKey(tenantId, viewOfTable)).isTransactional());
}
}
use of org.apache.phoenix.schema.PName in project phoenix by apache.
the class TestUtil method getSingleSumAggregator.
public static ClientAggregators getSingleSumAggregator(String url, Properties props) throws SQLException {
try (PhoenixConnection pconn = DriverManager.getConnection(url, props).unwrap(PhoenixConnection.class)) {
PhoenixStatement statement = new PhoenixStatement(pconn);
StatementContext context = new StatementContext(statement, null, new Scan(), new SequenceManager(statement));
AggregationManager aggregationManager = context.getAggregationManager();
SumAggregateFunction func = new SumAggregateFunction(Arrays.<Expression>asList(new KeyValueColumnExpression(new PLongColumn() {
@Override
public PName getName() {
return SINGLE_COLUMN_NAME;
}
@Override
public PName getFamilyName() {
return SINGLE_COLUMN_FAMILY_NAME;
}
@Override
public int getPosition() {
return 0;
}
@Override
public SortOrder getSortOrder() {
return SortOrder.getDefault();
}
@Override
public Integer getArraySize() {
return 0;
}
@Override
public byte[] getViewConstant() {
return null;
}
@Override
public boolean isViewReferenced() {
return false;
}
@Override
public String getExpressionStr() {
return null;
}
@Override
public boolean isRowTimestamp() {
return false;
}
@Override
public boolean isDynamic() {
return false;
}
@Override
public byte[] getColumnQualifierBytes() {
return SINGLE_COLUMN_NAME.getBytes();
}
})), null);
aggregationManager.setAggregators(new ClientAggregators(Collections.<SingleAggregateFunction>singletonList(func), 1));
ClientAggregators aggregators = aggregationManager.getAggregators();
return aggregators;
}
}
use of org.apache.phoenix.schema.PName in project phoenix by apache.
the class CorrelatePlanTest method createProjectedTableFromLiterals.
private TableRef createProjectedTableFromLiterals(Object[] row) {
List<PColumn> columns = Lists.<PColumn>newArrayList();
for (int i = 0; i < row.length; i++) {
String name = ParseNodeFactory.createTempAlias();
Expression expr = LiteralExpression.newConstant(row[i]);
PName colName = PNameFactory.newName(name);
columns.add(new PColumnImpl(PNameFactory.newName(name), PNameFactory.newName(VALUE_COLUMN_FAMILY), expr.getDataType(), expr.getMaxLength(), expr.getScale(), expr.isNullable(), i, expr.getSortOrder(), null, null, false, name, false, false, colName.getBytes()));
}
try {
PTable pTable = PTableImpl.makePTable(null, PName.EMPTY_NAME, PName.EMPTY_NAME, PTableType.SUBQUERY, null, MetaDataProtocol.MIN_TABLE_TIMESTAMP, PTable.INITIAL_SEQ_NUM, null, null, columns, null, null, Collections.<PTable>emptyList(), false, Collections.<PName>emptyList(), null, null, false, false, false, null, null, null, true, false, 0, 0L, Boolean.FALSE, null, false, ImmutableStorageScheme.ONE_CELL_PER_COLUMN, QualifierEncodingScheme.NON_ENCODED_QUALIFIERS, EncodedCQCounter.NULL_COUNTER, true);
TableRef sourceTable = new TableRef(pTable);
List<ColumnRef> sourceColumnRefs = Lists.<ColumnRef>newArrayList();
for (PColumn column : sourceTable.getTable().getColumns()) {
sourceColumnRefs.add(new ColumnRef(sourceTable, column.getPosition()));
}
return new TableRef(TupleProjectionCompiler.createProjectedTable(sourceTable, sourceColumnRefs, false));
} catch (SQLException e) {
throw new RuntimeException(e);
}
}
Aggregations