Skip to content

Commit

Permalink
Add decryption to dataset iterator
Browse files Browse the repository at this point in the history
  • Loading branch information
kerny3d committed Jan 17, 2024
1 parent 60a915a commit 06cc2aa
Show file tree
Hide file tree
Showing 5 changed files with 94 additions and 71 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -269,25 +269,23 @@ public static String substituteStandardWithDatasourceDelimiter(String columnName
public DataIterator iterator() {
LOGGER.debug("IN");
try {
IMetaData metadata = getMetadata();
IMetaData currMetadata = getMetadata();
QuerableBehaviour querableBehaviour = (QuerableBehaviour) getBehaviour(QuerableBehaviour.class.getName());
String statement = querableBehaviour.getStatement();
LOGGER.debug("Obtained statement [{}]", statement);
dataProxy.setStatement(statement);
JDBCDataProxy jdbcDataProxy = (JDBCDataProxy) dataProxy;
IDataSource dataSource = jdbcDataProxy.getDataSource();
Assert.assertNotNull(dataSource, "Invalid datasource");
try (Connection connection = dataSource.getConnection();
Statement stmt = connection.createStatement(ResultSet.TYPE_FORWARD_ONLY,
ResultSet.CONCUR_READ_ONLY)) {
Connection connection = dataSource.getConnection();
Statement stmt = connection.createStatement(ResultSet.TYPE_FORWARD_ONLY,
ResultSet.CONCUR_READ_ONLY);

connection.setAutoCommit(false); // PostgreSQL requires disabling auto-commit for setFetchSize to work
stmt.setFetchSize(5000);
connection.setAutoCommit(false); // PostgreSQL requires disabling auto-commit for setFetchSize to work
stmt.setFetchSize(5000);

try (ResultSet rs = (ResultSet) dataProxy.getData(dataReader, stmt)) {
return new ResultSetIterator(rs, metadata);
}
}
ResultSet rs = (ResultSet) dataProxy.getData(dataReader, stmt);
return new ResultSetIterator(connection, stmt, rs, currMetadata);
} catch (ClassNotFoundException | SQLException | NamingException e) {
throw new SpagoBIRuntimeException(e);
} finally {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,6 @@ public class FlatDataSet extends ConfigurableDataSet {

private String tableName = null;
private IDataSource dataSource = null;
private IMetaData metadata;

public FlatDataSet() {
}
Expand Down Expand Up @@ -96,15 +95,15 @@ public void setDataSource(IDataSource dataSource) {

@Override
public IMetaData getMetadata() {
IMetaData metadata = null;
IMetaData currMetadata = null;
try {
DatasetMetadataParser dsp = new DatasetMetadataParser();
metadata = dsp.xmlToMetadata(getDsMetadata());
currMetadata = dsp.xmlToMetadata(getDsMetadata());
} catch (Exception e) {
LOGGER.error("Error loading the metadata", e);
throw new SpagoBIEngineRuntimeException("Error loading the metadata", e);
}
return metadata;
return currMetadata;
}

@Override
Expand Down Expand Up @@ -160,17 +159,16 @@ public void setConfiguration(String configuration) {
public DataIterator iterator() {
LOGGER.debug("IN");
try {
IMetaData metadata = getMetadata();
IMetaData currMetadata = getMetadata();
String query = "select * from " + this.getTableName();
try (Connection connection = dataSource.getConnection(); Statement stmt = connection.createStatement()) {
Connection connection = dataSource.getConnection();
Statement stmt = connection.createStatement();

connection.setAutoCommit(false); // PostgreSQL requires disabling auto-commit for setFetchSize to work
stmt.setFetchSize(5000);
connection.setAutoCommit(false); // PostgreSQL requires disabling auto-commit for setFetchSize to work
stmt.setFetchSize(5000);

try (ResultSet rs = stmt.executeQuery(query)) {
return new ResultSetIterator(rs, metadata);
}
}
ResultSet rs = stmt.executeQuery(query);
return new ResultSetIterator(connection, stmt, rs, currMetadata);
} catch (Exception e) {
throw new SpagoBIRuntimeException(e);
} finally {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,8 @@

import javax.naming.NamingException;

import org.apache.log4j.Logger;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;

import it.eng.spagobi.services.dataset.bo.SpagoBiDataSet;
import it.eng.spagobi.tools.dataset.common.behaviour.QuerableBehaviour;
Expand All @@ -42,7 +43,7 @@

public class JDBCPostgreSQLDataSet extends JDBCDataSet {

private static transient Logger logger = Logger.getLogger(JDBCPostgreSQLDataSet.class);
private static final Logger LOGGER = LogManager.getLogger(JDBCPostgreSQLDataSet.class);

/**
* Instantiates a new empty JDBC PostgreSQL data set.
Expand All @@ -63,31 +64,28 @@ protected AbstractDataReader createDataReader() {

@Override
public DataIterator iterator() {
logger.debug("IN");
LOGGER.debug("IN");
try {
IMetaData metadata = getMetadata();
IMetaData currMetadata = getMetadata();
QuerableBehaviour querableBehaviour = (QuerableBehaviour) getBehaviour(QuerableBehaviour.class.getName());
String statement = querableBehaviour.getStatement();
logger.debug("Obtained statement [" + statement + "]");
LOGGER.debug("Obtained statement [{}]", statement);
dataProxy.setStatement(statement);
JDBCDataProxy jdbcDataProxy = (JDBCDataProxy) dataProxy;
IDataSource dataSource = jdbcDataProxy.getDataSource();
Assert.assertNotNull(dataSource, "Invalid datasource");
try (Connection connection = dataSource.getConnection();
Statement stmt = connection.createStatement(ResultSet.TYPE_FORWARD_ONLY,
ResultSet.CONCUR_READ_ONLY)) {
Connection connection = dataSource.getConnection();
Statement stmt = connection.createStatement(ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY);

connection.setAutoCommit(false); // PostgreSQL requires disabling auto-commit for setFetchSize to work
stmt.setFetchSize(5000);
connection.setAutoCommit(false); // PostgreSQL requires disabling auto-commit for setFetchSize to work
stmt.setFetchSize(5000);

try (ResultSet rs = (ResultSet) dataProxy.getData(dataReader, stmt)) {
return new ResultSetIterator(rs, metadata);
}
}
ResultSet rs = (ResultSet) dataProxy.getData(dataReader, stmt);
return new ResultSetIterator(connection, stmt, rs, currMetadata);
} catch (ClassNotFoundException | SQLException | NamingException e) {
throw new SpagoBIRuntimeException(e);
} finally {
logger.debug("OUT");
LOGGER.debug("OUT");
}
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -138,18 +138,16 @@ public SpagoBiDataSet toSpagoBiDataSet() {
public DataIterator iterator() {
LOGGER.debug("IN");
try {
IMetaData metadata = getMetadata();
IMetaData currMetadata = getMetadata();
String query = "select * from " + this.getTableName();
Connection connection = dataSource.getConnection();
Statement stmt = connection.createStatement();

try (Connection connection = dataSource.getConnection(); Statement stmt = connection.createStatement()) {
connection.setAutoCommit(false); // PostgreSQL requires disabling auto-commit for setFetchSize to work
stmt.setFetchSize(5000);

connection.setAutoCommit(false); // PostgreSQL requires disabling auto-commit for setFetchSize to work
stmt.setFetchSize(5000);

try (ResultSet rs = stmt.executeQuery(query)) {
return new ResultSetIterator(rs, metadata);
}
}
ResultSet rs = stmt.executeQuery(query);
return new ResultSetIterator(connection, stmt, rs, currMetadata);
} catch (Exception e) {
throw new SpagoBIRuntimeException(e);
} finally {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,8 +17,10 @@
*/
package it.eng.spagobi.tools.dataset.common.iterator;

import java.sql.Connection;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.Statement;
import java.util.ArrayList;
import java.util.LinkedHashMap;
import java.util.List;
Expand All @@ -27,10 +29,6 @@
import java.util.concurrent.atomic.AtomicInteger;
import java.util.stream.Collectors;

import javax.sql.rowset.CachedRowSet;
import javax.sql.rowset.RowSetFactory;
import javax.sql.rowset.RowSetProvider;

import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.jasypt.encryption.pbe.PBEStringEncryptor;
Expand All @@ -50,50 +48,77 @@ public class ResultSetIterator implements DataIterator {

private static final Logger LOGGER = LogManager.getLogger(ResultSetIterator.class);

private final CachedRowSet cache;
private final Connection conn;
private final Statement stmt;
private final ResultSet rs;
private final IMetaData metadata;
private final int columnCount;
private boolean needDecryption = false;
private final List<IFieldMetaData> decryptableField = new ArrayList<>();
private final Map<Integer, IFieldMetaData> decryptableFieldByIndex = new LinkedHashMap<>();
private PBEStringEncryptor encryptor;

public ResultSetIterator(ResultSet rs, IMetaData metadata) throws SQLException {
RowSetFactory rowSetFactory = RowSetProvider.newFactory();
cache = rowSetFactory.createCachedRowSet();
cache.populate(rs);
/**
* IMPORTANT!!! An {@code Iterator} has methods {@code hasNext()} and {@code next()} while a {@code ResultSet} object has only {@code next()}, that behaves like
* both {@code Iterator} {@code hasNext()} and {@code next()} at the same time, since it returns true if there are other elements while it is moving forward its
* internal cursor. But {@code Iterator.hasNext()} is not supposed to move forward, therefore it cannot invoke {@code ResultSet.next()} method!!! In order to
* harmonize those API, {@code ResultSetIterator} loads first record into {@code nextRow} variable during initialization (within the constructor) using the
* {@code loadNextRow} method; when {@code Iterator.next()} method is invoked, we get values from {@code nextRow} variable and then we move forward with
* {@code loadNextRow} method, overriding the values into {@code nextRow} variable or setting it to null in case there no more elements.
* {@code Iterator.hasNext()} method simply checks that {@code nextRow} is not null.
*/
private Object[] nextRow;

public ResultSetIterator(Connection conn, Statement stmt, ResultSet rs, IMetaData metadata) throws SQLException {
this.conn = conn;
this.stmt = stmt;
this.rs = rs;
this.columnCount = rs.getMetaData().getColumnCount();
this.metadata = metadata;
setUpDecryption();
loadNextRow();
}

private void loadNextRow() throws SQLException {
if (rs.next()) {
int columnsNumber = rs.getMetaData().getColumnCount();
Object[] row = new Object[columnsNumber];
for (int columnIndex = 1; columnIndex <= columnsNumber; columnIndex++) {
row[columnIndex - 1] = rs.getObject(columnIndex);
}
this.nextRow = row;
} else {
this.nextRow = null;
}
}

@Override
public boolean hasNext() {
try {
return cache.next();
} catch (SQLException e) {
return false;
}
return nextRow != null;
}

@Override
public IRecord next() {
IRecord currRecord = new Record();
for (int columnIndex = 1; columnIndex <= columnCount; columnIndex++) {
try {
Object columnValue;
columnValue = cache.getObject(columnIndex);
if (!hasNext()) {
throw new SpagoBIRuntimeException("ResultSet is empty or it was already scrolled completely");
}
int columnIndex = 0;
try {
IRecord currRecord = new Record();
for (columnIndex = 1; columnIndex <= columnCount; columnIndex++) {
Object columnValue = nextRow[columnIndex - 1];
IField field = new Field(columnValue);
if (columnValue != null) {
metadata.getFieldMeta(columnIndex - 1).setType(columnValue.getClass());
}
currRecord.appendField(field);
} catch (SQLException e) {
new SpagoBIRuntimeException("Error getting value at column " + columnIndex, e);
}
decryptIfNeeded(currRecord);
loadNextRow();
return currRecord;
} catch (SQLException e) {
throw new SpagoBIRuntimeException(e);
}
decryptIfNeeded(currRecord);
return currRecord;
}

@Override
Expand All @@ -105,8 +130,14 @@ public void remove() {
@Override
public void close() {
try {
if (cache != null) {
cache.close();
if (rs != null) {
rs.close();
}
if (stmt != null) {
stmt.close();
}
if (conn != null) {
conn.close();
}
} catch (SQLException e) {
throw new SpagoBIRuntimeException(e);
Expand Down

0 comments on commit 06cc2aa

Please sign in to comment.