diff --git a/loki/expression/tests/test_expression.py b/loki/expression/tests/test_expression.py index 2722d455b..16cd4d1b1 100644 --- a/loki/expression/tests/test_expression.py +++ b/loki/expression/tests/test_expression.py @@ -21,7 +21,7 @@ from loki.build import jit_compile, clean_test from loki.expression import symbols as sym, parse_expr, AttachScopesMapper from loki.frontend import ( - available_frontends, OMNI, FP, HAVE_FP, parse_fparser_expression + available_frontends, OMNI, HAVE_FP, parse_fparser_expression ) from loki.ir import ( nodes as ir, FindNodes, FindVariables, FindExpressions, @@ -924,43 +924,6 @@ def test_masked_statements_nested(tmp_path, frontend): clean_test(filepath) -@pytest.mark.parametrize('frontend', available_frontends(xfail=[ - (OMNI, 'Not implemented'), (FP, 'Not implemented') -])) -def test_data_declaration(tmp_path, frontend): - """ - Variable initialization with DATA statements - """ - fcode = """ -subroutine data_declaration(data_out) - implicit none - integer, dimension(5, 4), intent(out) :: data_out - integer, dimension(5, 4) :: data1, data2 - integer, dimension(3) :: data3 - integer :: i, j - - data data1 /20*5/ - - data ((data2(i,j), i=1,5), j=1,4) /20*3/ - - data data3(1), data3(3), data3(2) /1, 2, 3/ - - data_out(:,:) = data1(:,:) + data2(:,:) - data_out(1:3,1) = data3 -end subroutine data_declaration -""" - filepath = tmp_path/(f'expression_data_declaration_{frontend}.f90') - routine = Subroutine.from_source(fcode, frontend=frontend) - function = jit_compile(routine, filepath=filepath, objname='data_declaration') - - expected = np.ones(shape=(5, 4), dtype=np.int32, order='F') * 8 - expected[[0, 1, 2], 0] = [1, 3, 2] - result = np.zeros(shape=(5, 4), dtype=np.int32, order='F') - function(result) - assert np.all(result == expected) - clean_test(filepath) - - @pytest.mark.parametrize('frontend', available_frontends()) def test_pointer_nullify(tmp_path, frontend): """ diff --git a/loki/frontend/preprocessing.py b/loki/frontend/preprocessing.py index eeb6d092c..ad2780923 100644 --- a/loki/frontend/preprocessing.py +++ b/loki/frontend/preprocessing.py @@ -18,7 +18,7 @@ from loki.logging import debug, detail from loki.config import config from loki.tools import as_tuple, gettempdir, filehash -from loki.ir import VariableDeclaration, Intrinsic, FindNodes +from loki.ir import Intrinsic, FindNodes from loki.frontend.util import OMNI, FP, REGEX @@ -126,18 +126,6 @@ def sanitize_input(source, frontend): return source, pp_info -def reinsert_contiguous(ir, pp_info): - """ - Reinsert the CONTIGUOUS marker into declaration variables. - """ - if pp_info: - for decl in FindNodes(VariableDeclaration).visit(ir): - if decl.source.lines[0] in pp_info: - for var in decl.symbols: - var.scope.symbol_attrs[var.name] = var.scope.symbol_attrs[var.name].clone(contiguous=True) - return ir - - def reinsert_convert_endian(ir, pp_info): """ Reinsert the CONVERT='BIG_ENDIAN' or CONVERT='LITTLE_ENDIAN' arguments diff --git a/loki/frontend/source.py b/loki/frontend/source.py index 49caa08bc..621599d2d 100644 --- a/loki/frontend/source.py +++ b/loki/frontend/source.py @@ -20,10 +20,7 @@ from loki.logging import debug, warning -__all__ = [ - 'Source', 'FortranReader', 'extract_source', 'extract_source_from_range', 'source_to_lines', - 'join_source_list' -] +__all__ = ['Source', 'FortranReader', 'source_to_lines', 'join_source_list'] class Source: @@ -368,76 +365,6 @@ def source_from_current_line(self): return Source(lines=line.span, string='\n'.join(self.source_lines[start:end+1])) -def extract_source(ast, text, label=None, full_lines=False): - """ - Extract the marked string from source text. - """ - attrib = getattr(ast, 'attrib', ast) - lstart = int(attrib['line_begin']) - lend = int(attrib['line_end']) - cstart = int(attrib['col_begin']) - cend = int(attrib['col_end']) - return extract_source_from_range((lstart, lend), (cstart, cend), text, label=label, full_lines=full_lines) - - -def extract_source_from_range(lines, columns, text, label=None, full_lines=False): - """ - Extract the marked string from source text. - """ - text = text.splitlines(keepends=True) - lstart, lend = lines - cstart, cend = columns - - if full_lines: - return Source(string=''.join(text[lstart-1:lend]).strip('\n'), lines=lines) - - lines = text[lstart-1:lend] - - # Scan for line continuations and honour inline - # comments in between continued lines - def continued(line): - if '!' in line: - line = line.split('!')[0] - return line.strip().endswith('&') - - def is_comment(line): - return line.strip().startswith('!') - - # We only honour line continuation if we're not parsing a comment - if not is_comment(lines[-1]): - while continued(lines[-1]) or is_comment(lines[-1]): - lend += 1 - # TODO: Strip the leading empty space before the '&' - lines.append(text[lend-1]) - - # If line continuation is used, move column index to the relevant parts - while cstart >= len(lines[0]): - if not is_comment(lines[0]): - cstart -= len(lines[0]) - cend -= len(lines[0]) - lines = lines[1:] - lstart += 1 - - # Move column index by length of the label if given - if label is not None: - cstart += len(label) - cend += len(label) - - # Avoid stripping indentation - if lines[0][:cstart].strip() == '': - cstart = 0 - - # TODO: The column indexes are still not right, so source strings - # for sub-expressions are likely wrong! - if lstart == lend: - lines[0] = lines[0][cstart:cend] - else: - lines[0] = lines[0][cstart:] - lines[-1] = lines[-1][:cend] - - return Source(string=''.join(lines).strip('\n'), lines=(lstart, lend)) - - def _merge_source_match_source(pre, match, post): """ Merge a triple of :class:`Source`, :class:`re.Match`, :class:`Source` objects