Skip to content

Commit

Permalink
PHOENIX-6655 SYSTEM.SEQUENCE should have CACHE_DATA_ON_WRITE set to true
Browse files Browse the repository at this point in the history
  • Loading branch information
gtaparia authored and stoty committed Dec 15, 2022
1 parent 0fef053 commit 071b8f0
Show file tree
Hide file tree
Showing 2 changed files with 94 additions and 2 deletions.
58 changes: 58 additions & 0 deletions phoenix-core/src/it/java/org/apache/phoenix/end2end/UpgradeIT.java
Original file line number Diff line number Diff line change
Expand Up @@ -63,12 +63,15 @@
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.KeepDeletedCells;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.phoenix.schema.types.PInteger;
import org.apache.phoenix.thirdparty.com.google.common.collect.Lists;
import org.apache.phoenix.thirdparty.com.google.common.collect.Sets;
Expand All @@ -80,6 +83,7 @@
import org.apache.phoenix.query.ConnectionQueryServices;
import org.apache.phoenix.query.ConnectionQueryServicesImpl;
import org.apache.phoenix.query.DelegateConnectionQueryServices;
import org.apache.phoenix.query.QueryConstants;
import org.apache.phoenix.query.QueryServices;
import org.apache.phoenix.schema.PName;
import org.apache.phoenix.schema.PNameFactory;
Expand Down Expand Up @@ -416,6 +420,60 @@ public void testRemoveScnFromTaskAndLog() throws Exception {
SchemaUtil.getEmptyColumnFamily(sysStatsTable)).getMaxVersions());
}

@Test
public void testCacheOnWritePropsOnSystemSequence() throws Exception {
PhoenixConnection conn = getConnection(false, null).
unwrap(PhoenixConnection.class);
ConnectionQueryServicesImpl cqs = (ConnectionQueryServicesImpl)(conn.getQueryServices());

TableDescriptor initialTD = utility.getAdmin().getDescriptor(
SchemaUtil.getPhysicalTableName(PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_NAME,
cqs.getProps()));
ColumnFamilyDescriptor initialCFD = initialTD.getColumnFamily(
QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES);

// Confirm that the Cache-On-Write related properties are set
// on SYSTEM.SEQUENCE during creation.
assertEquals(Boolean.TRUE, initialCFD.isCacheBloomsOnWrite());
assertEquals(Boolean.TRUE, initialCFD.isCacheDataOnWrite());
assertEquals(Boolean.TRUE, initialCFD.isCacheIndexesOnWrite());

// Check to see whether the Cache-On-Write related properties are set on
// pre-existing tables too via the upgrade path. We do the below to test it :
// 1. Explicitly disable the Cache-On-Write related properties on the table.
// 2. Call the Upgrade Path on the table.
// 3. Verify that the property is set after the upgrades too.
ColumnFamilyDescriptorBuilder newCFBuilder =
ColumnFamilyDescriptorBuilder.newBuilder(initialCFD);
newCFBuilder.setCacheBloomsOnWrite(false);
newCFBuilder.setCacheDataOnWrite(false);
newCFBuilder.setCacheIndexesOnWrite(false);
TableDescriptorBuilder newTD = TableDescriptorBuilder.newBuilder(initialTD);
newTD.modifyColumnFamily(newCFBuilder.build());
utility.getAdmin().modifyTable(newTD.build());

// Check that the Cache-On-Write related properties are now disabled.
TableDescriptor updatedTD = utility.getAdmin().getDescriptor(
SchemaUtil.getPhysicalTableName(PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_NAME,
cqs.getProps()));
ColumnFamilyDescriptor updatedCFD = updatedTD.getColumnFamily(
QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES);
assertEquals(Boolean.FALSE, updatedCFD.isCacheBloomsOnWrite());
assertEquals(Boolean.FALSE, updatedCFD.isCacheDataOnWrite());
assertEquals(Boolean.FALSE, updatedCFD.isCacheIndexesOnWrite());

// Let's try upgrading the existing table - and see if the property is set on
// during upgrades.
cqs.upgradeSystemSequence(conn, new HashMap<String, String>());

updatedTD = utility.getAdmin().getDescriptor(SchemaUtil.getPhysicalTableName(
PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_NAME, cqs.getProps()));
updatedCFD = updatedTD.getColumnFamily(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES);
assertEquals(Boolean.TRUE, updatedCFD.isCacheBloomsOnWrite());
assertEquals(Boolean.TRUE, updatedCFD.isCacheDataOnWrite());
assertEquals(Boolean.TRUE, updatedCFD.isCacheIndexesOnWrite());
}

private Set<String> getChildLinks(Connection conn) throws SQLException {
ResultSet rs =
conn.createStatement().executeQuery(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -3516,6 +3516,11 @@ private void createOtherSystemTables(PhoenixConnection metaConnection) throws SQ
QueryServices.SEQUENCE_SALT_BUCKETS_ATTRIB,
QueryServicesOptions.DEFAULT_SEQUENCE_TABLE_SALT_BUCKETS);
metaConnection.createStatement().execute(getSystemSequenceTableDDL(nSequenceSaltBuckets));
// When creating the table above, DDL statements are
// used. However, the CFD level properties are not set
// via DDL commands, hence we are explicitly setting
// few properties using the Admin API below.
updateSystemSequenceWithCacheOnWriteProps(metaConnection);
} catch (TableAlreadyExistsException e) {
nSequenceSaltBuckets = getSaltBuckets(e);
}
Expand Down Expand Up @@ -4100,9 +4105,10 @@ private PhoenixConnection upgradeSystemChildLink(
return metaConnection;
}

private PhoenixConnection upgradeSystemSequence(
@VisibleForTesting
public PhoenixConnection upgradeSystemSequence(
PhoenixConnection metaConnection,
Map<String, String> systemTableToSnapshotMap) throws SQLException {
Map<String, String> systemTableToSnapshotMap) throws SQLException, IOException {
int nSaltBuckets = ConnectionQueryServicesImpl.this.props.getInt(
QueryServices.SEQUENCE_SALT_BUCKETS_ATTRIB,
QueryServicesOptions.DEFAULT_SEQUENCE_TABLE_SALT_BUCKETS);
Expand Down Expand Up @@ -4158,10 +4164,38 @@ private PhoenixConnection upgradeSystemSequence(
} else {
nSequenceSaltBuckets = getSaltBuckets(e);
}

updateSystemSequenceWithCacheOnWriteProps(metaConnection);
}
return metaConnection;
}

private void updateSystemSequenceWithCacheOnWriteProps(PhoenixConnection metaConnection) throws
IOException, SQLException {

try (Admin admin = getAdmin()) {
TableDescriptor oldTD = admin.getDescriptor(
SchemaUtil.getPhysicalTableName(PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_NAME,
metaConnection.getQueryServices().getProps()));
ColumnFamilyDescriptor oldCf = oldTD.getColumnFamily(
QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES);

// If the CacheOnWrite related properties are not set, lets set them.
if (!oldCf.isCacheBloomsOnWrite() || !oldCf.isCacheDataOnWrite()
|| !oldCf.isCacheIndexesOnWrite()) {
ColumnFamilyDescriptorBuilder newCFBuilder =
ColumnFamilyDescriptorBuilder.newBuilder(oldCf);
newCFBuilder.setCacheBloomsOnWrite(true);
newCFBuilder.setCacheDataOnWrite(true);
newCFBuilder.setCacheIndexesOnWrite(true);

TableDescriptorBuilder newTD = TableDescriptorBuilder.newBuilder(oldTD);
newTD.modifyColumnFamily(newCFBuilder.build());
admin.modifyTable(newTD.build());
}
}
}

private void takeSnapshotOfSysTable(
Map<String, String> systemTableToSnapshotMap,
TableAlreadyExistsException e) throws SQLException {
Expand Down

0 comments on commit 071b8f0

Please sign in to comment.