diff --git a/pacman/model/constraints/key_allocator_constraints/__init__.py b/pacman/model/constraints/key_allocator_constraints/__init__.py index c13031741..7a534e237 100644 --- a/pacman/model/constraints/key_allocator_constraints/__init__.py +++ b/pacman/model/constraints/key_allocator_constraints/__init__.py @@ -15,14 +15,8 @@ from .abstract_key_allocator_constraint import AbstractKeyAllocatorConstraint from .contiguous_key_range_constraint import ContiguousKeyRangeContraint -from .fixed_key_field_constraint import FixedKeyFieldConstraint from .fixed_key_and_mask_constraint import FixedKeyAndMaskConstraint -from .fixed_mask_constraint import FixedMaskConstraint -from .share_key_constraint import ShareKeyConstraint __all__ = ["AbstractKeyAllocatorConstraint", "ContiguousKeyRangeContraint", - "FixedKeyFieldConstraint", - "FixedKeyAndMaskConstraint", - "FixedMaskConstraint", - "ShareKeyConstraint"] + "FixedKeyAndMaskConstraint"] diff --git a/pacman/model/constraints/key_allocator_constraints/fixed_key_and_mask_constraint.py b/pacman/model/constraints/key_allocator_constraints/fixed_key_and_mask_constraint.py index de1df0ed4..dd74f0fa8 100644 --- a/pacman/model/constraints/key_allocator_constraints/fixed_key_and_mask_constraint.py +++ b/pacman/model/constraints/key_allocator_constraints/fixed_key_and_mask_constraint.py @@ -30,30 +30,21 @@ class FixedKeyAndMaskConstraint(AbstractKeyAllocatorConstraint): # The key and mask combinations to fix "_keys_and_masks", - # Optional function which will be called to translate the - # keys_and_masks list into individual keys If missing, the keys will - # be generated by iterating through the keys_and_masks list directly. - # The function parameters are: - # An iterable of keys and masks - # A machine edge - # Number of keys to generate (may be None) - "_key_list_function" + # The identifier of the partition to which this applies, or None + # if only one partition is expected + "_partition" ] - def __init__(self, keys_and_masks, key_list_function=None): + def __init__(self, keys_and_masks, partition=None): """ :param iterable(BaseKeyAndMask) keys_and_masks: The key and mask combinations to fix - :param key_list_function: Optional function which will be called to\ - translate the `keys_and_masks` list into individual keys. If\ - missing, the keys will be generated by iterating through the \ - `keys_and_masks` list directly. The function parameters are: - * An iterable of keys and masks - * A machine edge - * Number of keys to generate (may be None) - :type key_list_function: callable(iterable(tuple( - BaseKeyAndMask, MachineEdge, int)), iterable(int)) + :param partition: + The identifier of the partition to which this constraint applies, + or None if it applies to all partitions (meaning there is only + one partition expected) + :type partition: str or None """ for keys_and_mask in keys_and_masks: if not isinstance(keys_and_mask, BaseKeyAndMask): @@ -62,7 +53,7 @@ def __init__(self, keys_and_masks, key_list_function=None): "a key_and_mask object") self._keys_and_masks = keys_and_masks - self._key_list_function = key_list_function + self._partition = partition @property def keys_and_masks(self): @@ -74,26 +65,31 @@ def keys_and_masks(self): return self._keys_and_masks @property - def key_list_function(self): - """ A function to call to generate the keys + def partition(self): + """ The identifier of the partition to which this constraint applies, + or None if it applies to the only expected partition - :return: A python function, or None if the default function can be used - :rtype: callable(iterable(tuple(BaseKeyAndMask, MachineEdge, int)), - iterable(int)) + :rtype: str or None """ - return self._key_list_function + return self._partition + + def applies_to_partition(self, partition): + """ Determine if this applies to the given partition identifier or not + + :param str partition: The identifier of the partition to check + :rtype: bool + """ + return self._partition is None or self._partition == partition def __repr__(self): return ( "FixedKeyAndMaskConstraint(" - "keys_and_masks={}, key_list_function={})".format( - self._keys_and_masks, self.key_list_function)) + "keys_and_masks={}, partition={})".format( + self._keys_and_masks, self._partition)) def __eq__(self, other): if not isinstance(other, FixedKeyAndMaskConstraint): return False - if other.key_list_function != self._key_list_function: - return False if len(self._keys_and_masks) != len(other.keys_and_masks): return False return all(km in other.keys_and_masks for km in self._keys_and_masks) @@ -102,6 +98,4 @@ def __ne__(self, other): return not self.__eq__(other) def __hash__(self): - return ( - frozenset(self._keys_and_masks), - self._key_list_function).__hash__() + return frozenset(self._keys_and_masks).__hash__() diff --git a/pacman/model/constraints/key_allocator_constraints/fixed_key_field_constraint.py b/pacman/model/constraints/key_allocator_constraints/fixed_key_field_constraint.py deleted file mode 100644 index 32e10a312..000000000 --- a/pacman/model/constraints/key_allocator_constraints/fixed_key_field_constraint.py +++ /dev/null @@ -1,72 +0,0 @@ -# Copyright (c) 2017-2019 The University of Manchester -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . - -from .abstract_key_allocator_constraint import AbstractKeyAllocatorConstraint - - -class FixedKeyFieldConstraint(AbstractKeyAllocatorConstraint): - """ Constraint that indicates fields in the mask of a key. - """ - - __slots__ = [ - # any fields that define regions in the mask with further limitations - "_fields" - ] - - def __init__(self, fields): - """ - :param iterable(Field) fields: - any fields that define regions in the mask with further limitations - :raise PacmanInvalidParameterException: - if any of the fields are outside of the mask i.e.,: - - mask & field.value != field.value - - or if any of the field masks overlap i.e.,: - - field.value & other_field.value != 0 - """ - self._fields = sorted(fields, key=lambda field: field.value, - reverse=True) - # TODO: Enforce the documented restrictions - - @property - def fields(self): - """ Any fields in the mask, i.e., ranges of the mask that have\ - further limitations - - :return: Iterable of fields, ordered by mask with the highest bit - range first - :rtype: list(Field) - """ - return self._fields - - def __eq__(self, other): - if not isinstance(other, FixedKeyFieldConstraint): - return False - if len(self._fields) != len(other.fields): - return False - return all(field in other.fields for field in self._fields) - - def __ne__(self, other): - return not self.__eq__(other) - - def __hash__(self): - frozen_fields = frozenset(self._fields) - return hash(frozen_fields) - - def __repr__(self): - return "FixedKeyFieldConstraint(fields={})".format( - self._fields) diff --git a/pacman/model/constraints/key_allocator_constraints/fixed_mask_constraint.py b/pacman/model/constraints/key_allocator_constraints/fixed_mask_constraint.py deleted file mode 100644 index 4fd69772e..000000000 --- a/pacman/model/constraints/key_allocator_constraints/fixed_mask_constraint.py +++ /dev/null @@ -1,57 +0,0 @@ -# Copyright (c) 2017-2019 The University of Manchester -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . - -from .abstract_key_allocator_constraint import AbstractKeyAllocatorConstraint - - -class FixedMaskConstraint(AbstractKeyAllocatorConstraint): - """ A key allocator that fixes the mask to be assigned to an edge. - - .. note:: - Used for some neuron-connected output devices. - """ - - __slots__ = [ - # the mask to be used during key allocation - "_mask" - ] - - def __init__(self, mask): - """ - :param int mask: the mask to be used during key allocation - """ - self._mask = mask - - @property - def mask(self): - """ The mask to be used - - :rtype: int - """ - return self._mask - - def __eq__(self, other): - if not isinstance(other, FixedMaskConstraint): - return False - return self._mask == other.mask - - def __ne__(self, other): - return not self.__eq__(other) - - def __hash__(self): - return hash(self._mask) - - def __repr__(self): - return "FixedMaskConstraint(mask={})".format(self._mask) diff --git a/pacman/model/constraints/key_allocator_constraints/share_key_constraint.py b/pacman/model/constraints/key_allocator_constraints/share_key_constraint.py deleted file mode 100644 index e0afc7f03..000000000 --- a/pacman/model/constraints/key_allocator_constraints/share_key_constraint.py +++ /dev/null @@ -1,46 +0,0 @@ -# Copyright (c) 2017-2019 The University of Manchester -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . - -from .abstract_key_allocator_constraint import AbstractKeyAllocatorConstraint - - -class ShareKeyConstraint(AbstractKeyAllocatorConstraint): - """ Constraint to allow the same keys to be allocated to multiple edges - via partitions. - - .. warning:: - This constraint appears to be unused. - """ - - __slots__ = [ - # The set of outgoing partitions to vertices which all share the same\ - # key - "_other_partitions" - ] - - def __init__(self, other_partitions): - """ - :param list(AbstractSingleSourcePartition) other_partitions: - the other edges which keys are shared with. - """ - self._other_partitions = other_partitions - - @property - def other_partitions(self): - """ the other edges which keys are shared with - - :rtype: list(AbstractSingleSourcePartition) - """ - return self._other_partitions diff --git a/pacman/model/constraints/partitioner_constraints/__init__.py b/pacman/model/constraints/partitioner_constraints/__init__.py index 55740b016..49268740c 100644 --- a/pacman/model/constraints/partitioner_constraints/__init__.py +++ b/pacman/model/constraints/partitioner_constraints/__init__.py @@ -14,7 +14,5 @@ # along with this program. If not, see . from .abstract_partitioner_constraint import AbstractPartitionerConstraint -from .same_atoms_as_vertex_constraint import SameAtomsAsVertexConstraint -__all__ = ["AbstractPartitionerConstraint", - "SameAtomsAsVertexConstraint"] +__all__ = ["AbstractPartitionerConstraint"] diff --git a/pacman/model/constraints/partitioner_constraints/same_atoms_as_vertex_constraint.py b/pacman/model/constraints/partitioner_constraints/same_atoms_as_vertex_constraint.py deleted file mode 100644 index 508cf4a0e..000000000 --- a/pacman/model/constraints/partitioner_constraints/same_atoms_as_vertex_constraint.py +++ /dev/null @@ -1,63 +0,0 @@ -# Copyright (c) 2017-2019 The University of Manchester -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . - -from .abstract_partitioner_constraint import AbstractPartitionerConstraint - - -class SameAtomsAsVertexConstraint(AbstractPartitionerConstraint): - """ A constraint which indicates that a vertex must be split in the\ - same way as another vertex. - """ - - __slots__ = [ - # The application vertex to which the constraint refers - "_vertex" - ] - - def __init__(self, vertex): - """ - :param ApplicationVertex vertex: - The vertex to which the constraint refers - """ - self._vertex = vertex - raise NotImplementedError( - "SameAtomsAsVertexConstraint is no longer supported. " - "Please contact spinnakerusers@googlegroups.com to discuss your " - "requirements.") - - @property - def vertex(self): - """ The vertex to partition with - - :rtype: ApplicationVertex - """ - return self._vertex - - def __repr__(self): - return "SameAtomsAsVertexConstraint(vertex={})".format( - self._vertex) - - def __eq__(self, other): - if not isinstance(other, SameAtomsAsVertexConstraint): - return False - return self._vertex == other.vertex - - def __ne__(self, other): - if not isinstance(other, SameAtomsAsVertexConstraint): - return True - return not self.__eq__(other) - - def __hash__(self): - return hash((self._vertex,)) diff --git a/pacman/model/constraints/placer_constraints/__init__.py b/pacman/model/constraints/placer_constraints/__init__.py index 3fc994cdc..41686d79d 100644 --- a/pacman/model/constraints/placer_constraints/__init__.py +++ b/pacman/model/constraints/placer_constraints/__init__.py @@ -14,13 +14,6 @@ # along with this program. If not, see . from .abstract_placer_constraint import AbstractPlacerConstraint -from .board_constraint import BoardConstraint from .chip_and_core_constraint import ChipAndCoreConstraint -from .radial_placement_from_chip_constraint import ( - RadialPlacementFromChipConstraint) -from .same_chip_as_constraint import SameChipAsConstraint -__all__ = ["AbstractPlacerConstraint", "BoardConstraint", - "ChipAndCoreConstraint", - "RadialPlacementFromChipConstraint", - "SameChipAsConstraint"] +__all__ = ["AbstractPlacerConstraint", "ChipAndCoreConstraint"] diff --git a/pacman/model/constraints/placer_constraints/board_constraint.py b/pacman/model/constraints/placer_constraints/board_constraint.py deleted file mode 100644 index 68d15d3d4..000000000 --- a/pacman/model/constraints/placer_constraints/board_constraint.py +++ /dev/null @@ -1,58 +0,0 @@ -# Copyright (c) 2017-2019 The University of Manchester -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . - -from .abstract_placer_constraint import AbstractPlacerConstraint - - -class BoardConstraint(AbstractPlacerConstraint): - """ A constraint on the board on which a placement is made. - """ - - __slots__ = [ - # The IP address of the Ethernet of the board to be used - "_board_address" - ] - - def __init__(self, board_address): - """ - :param str board_address: - The IP address of the Ethernet of the board to be used - """ - self._board_address = board_address - - @property - def board_address(self): - """ The board of the constraint - - :rtype: str - """ - return self._board_address - - def __repr__(self): - return "BoardConstraint(board_address=\"{}\")".format( - self._board_address) - - def __eq__(self, other): - if not isinstance(other, BoardConstraint): - return False - return self._board_address == other.board_address - - def __ne__(self, other): - if not isinstance(other, BoardConstraint): - return True - return not self.__eq__(other) - - def __hash__(self): - return hash((self._board_address,)) diff --git a/pacman/model/constraints/placer_constraints/radial_placement_from_chip_constraint.py b/pacman/model/constraints/placer_constraints/radial_placement_from_chip_constraint.py deleted file mode 100644 index 789c53a6a..000000000 --- a/pacman/model/constraints/placer_constraints/radial_placement_from_chip_constraint.py +++ /dev/null @@ -1,71 +0,0 @@ -# Copyright (c) 2017-2019 The University of Manchester -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . - -from .abstract_placer_constraint import AbstractPlacerConstraint - - -class RadialPlacementFromChipConstraint(AbstractPlacerConstraint): - """ A constraint that attempts to place a vertex as close to a chip\ - as possible (including on it). - """ - - __slots__ = [ - # the chip x coord in the SpiNNaker machine to which the machine - # vertex is placed - "_x", - - # the chip y coord in the SpiNNaker machine to which the machine - # vertex is placed - "_y" - ] - - def __init__(self, x, y): - """ - :param int x: the x-coordinate of the chip - :param int y: the y-coordinate of the chip - """ - self._x = int(x) - self._y = int(y) - - @property - def x(self): - """ - :rtype: int - """ - return self._x - - @property - def y(self): - """ - :rtype: int - """ - return self._y - - def __repr__(self): - return "RadialPlacementFromChipConstraint(x={}, y={})".format( - self._x, self._y) - - def __eq__(self, other): - if not isinstance(other, RadialPlacementFromChipConstraint): - return False - return (self._x, self._y) == (other.x, other.y) - - def __ne__(self, other): - if not isinstance(other, RadialPlacementFromChipConstraint): - return True - return not self.__eq__(other) - - def __hash__(self): - return hash((self._x, self._y)) diff --git a/pacman/model/constraints/placer_constraints/same_chip_as_constraint.py b/pacman/model/constraints/placer_constraints/same_chip_as_constraint.py deleted file mode 100644 index 4bdba3707..000000000 --- a/pacman/model/constraints/placer_constraints/same_chip_as_constraint.py +++ /dev/null @@ -1,57 +0,0 @@ -# Copyright (c) 2017-2019 The University of Manchester -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . - -from .abstract_placer_constraint import AbstractPlacerConstraint - - -class SameChipAsConstraint(AbstractPlacerConstraint): - """ Indicates that a vertex should be placed on the same chip as another\ - vertex. - """ - - __slots__ = [ - # The vertex to place on the same chip - "_vertex" - ] - - def __init__(self, vertex): - """ - :param AbstractVertex vertex: The vertex to place on the same chip - """ - self._vertex = vertex - - @property - def vertex(self): - """ The vertex to place on the same chip - - :rtype: AbstractVertex - """ - return self._vertex - - def __repr__(self): - return "SameChipAsConstraint(vertex={})".format(self._vertex) - - def __eq__(self, other): - if not isinstance(other, SameChipAsConstraint): - return False - return self._vertex == other.vertex - - def __ne__(self, other): - if not isinstance(other, SameChipAsConstraint): - return True - return not self.__eq__(other) - - def __hash__(self): - return hash((self._vertex, )) diff --git a/pacman/model/graphs/abstract_edge_partition.py b/pacman/model/graphs/abstract_edge_partition.py index 1aca487ea..3c80d5016 100644 --- a/pacman/model/graphs/abstract_edge_partition.py +++ b/pacman/model/graphs/abstract_edge_partition.py @@ -12,17 +12,15 @@ # # You should have received a copy of the GNU General Public License # along with this program. If not, see . -from spinn_utilities.abstract_base import ( - AbstractBase, abstractmethod, abstractproperty) +from spinn_utilities.abstract_base import AbstractBase, abstractproperty from spinn_utilities.ordered_set import OrderedSet from pacman.exceptions import ( - PacmanConfigurationException, PacmanInvalidParameterException) -from pacman.model.graphs.common import ConstrainedObject + PacmanInvalidParameterException, PacmanAlreadyExistsException) _REPR_TEMPLATE = "{}(identifier={}, edges={}, constraints={}, label={})" -class AbstractEdgePartition(ConstrainedObject, metaclass=AbstractBase): +class AbstractEdgePartition(object, metaclass=AbstractBase): """ A collection of edges which start at a single vertex which have the\ same semantics and so can share a single key or block of SDRAM\ (depending on edge type). @@ -34,68 +32,28 @@ class AbstractEdgePartition(ConstrainedObject, metaclass=AbstractBase): # The edges in the partition "_edges", # The type of edges to accept - "_allowed_edge_types", - # The weight of traffic going down this partition - "_traffic_weight", - # The label of the graph - "_label", - # class name - "_class_name", - # Safety code generated by the graph when added to that graph - "_graph_code" + "_allowed_edge_types" ] def __init__( - self, identifier, allowed_edge_types, constraints, - label, traffic_weight, class_name): + self, identifier, allowed_edge_types): """ :param str identifier: The identifier of the partition :param allowed_edge_types: The types of edges allowed :type allowed_edge_types: type or tuple(type, ...) - :param iterable(AbstractConstraint) constraints: - Any initial constraints :param str label: An optional label of the partition - :param int traffic_weight: - The weight of traffic going down this partition """ - super().__init__(constraints) - self._label = label self._identifier = identifier - self._edges = OrderedSet() self._allowed_edge_types = allowed_edge_types - self._traffic_weight = traffic_weight - self._class_name = class_name - self._graph_code = None - - @property - def label(self): - """ The label of the edge partition. - - :rtype: str - """ - return self._label + self._edges = OrderedSet() - def add_edge(self, edge, graph_code): + def add_edge(self, edge): """ Add an edge to the edge partition. - .. note:: - This method should only be called by the ``add_edge`` method of - the graph that owns the partition. Calling it from anywhere else, - even with the correct graph_code, will lead to unsupported - inconsistency. - :param AbstractEdge edge: the edge to add - :param int graph_code: - A code to check the correct graph is calling this method :raises PacmanInvalidParameterException: If the edge does not belong in this edge partition """ - if graph_code != self._graph_code: - raise PacmanConfigurationException( - "Only one graph should add edges") - if self._graph_code is None: - raise PacmanConfigurationException( - "Only Graphs can add edges to partitions") # Check for an incompatible edge if not isinstance(edge, self._allowed_edge_types): @@ -103,18 +61,10 @@ def add_edge(self, edge, graph_code): "edge", str(edge.__class__), "Edges of this graph must be one of the following types:" " {}".format(self._allowed_edge_types)) + if edge in self._edges: + raise PacmanAlreadyExistsException("Edge", edge) self._edges.add(edge) - def register_graph_code(self, graph_code): - """ - Allows the graph to register its code when the partition is added - """ - if self._graph_code is not None: - raise PacmanConfigurationException( - "Illegal attempt to add partition {} to a second " - "graph".format(self)) - self._graph_code = graph_code - @property def identifier(self): """ The identifier of this edge partition. @@ -143,25 +93,9 @@ def n_edges(self): """ return len(self._edges) - @property - def traffic_weight(self): - """ The weight of the traffic in this edge partition compared to\ - other partitions. - - :rtype: int - """ - return self._traffic_weight - def __repr__(self): - edges = "" - for edge in self._edges: - if edge.label is not None: - edges += edge.label + "," - else: - edges += str(edge) + "," - return _REPR_TEMPLATE.format( - self._class_name, self._identifier, edges, self.constraints, - self.label) + return (f"{self.__class__.__name__}(identifier={self.identifier}" + f", n_edges={self.n_edges})") def __str__(self): return self.__repr__() @@ -174,16 +108,6 @@ def __contains__(self, edge): """ return edge in self._edges - @abstractmethod - def clone_without_edges(self): - """ Make a copy of this edge partition without any of the edges in it - - This follows the design pattern that only the graph adds edges to - partitions already added to the graph - - :return: The copied edge partition but excluding edges - """ - @abstractproperty def pre_vertices(self): """ diff --git a/pacman/model/graphs/abstract_multiple_partition.py b/pacman/model/graphs/abstract_multiple_partition.py index d3edad63a..d94bff5a5 100644 --- a/pacman/model/graphs/abstract_multiple_partition.py +++ b/pacman/model/graphs/abstract_multiple_partition.py @@ -30,13 +30,9 @@ class AbstractMultiplePartition(AbstractEdgePartition): "_destinations" ] - def __init__( - self, pre_vertices, identifier, allowed_edge_types, constraints, - label, traffic_weight, class_name): + def __init__(self, pre_vertices, identifier, allowed_edge_types): super().__init__( - identifier=identifier, - allowed_edge_types=allowed_edge_types, constraints=constraints, - label=label, traffic_weight=traffic_weight, class_name=class_name) + identifier=identifier, allowed_edge_types=allowed_edge_types) self._pre_vertices = dict() self._destinations = defaultdict(OrderedSet) @@ -50,17 +46,18 @@ def __init__( "There were clones in your list of acceptable pre vertices") @overrides(AbstractEdgePartition.add_edge) - def add_edge(self, edge, graph_code): + def add_edge(self, edge): # safety checks if edge.pre_vertex not in self._pre_vertices.keys(): raise Exception( "The edge {} is not allowed in this outgoing partition".format( edge)) + super(AbstractMultiplePartition, self).add_edge(edge) + # update self._pre_vertices[edge.pre_vertex].add(edge) self._destinations[edge.post_vertex].add(edge) - super().add_edge(edge, graph_code) @property @overrides(AbstractEdgePartition.pre_vertices) diff --git a/pacman/model/graphs/abstract_single_source_partition.py b/pacman/model/graphs/abstract_single_source_partition.py index 0fbb96259..f9f33ffbc 100644 --- a/pacman/model/graphs/abstract_single_source_partition.py +++ b/pacman/model/graphs/abstract_single_source_partition.py @@ -28,20 +28,17 @@ class AbstractSingleSourcePartition( ] def __init__( - self, pre_vertex, identifier, allowed_edge_types, constraints, - label, traffic_weight, class_name): + self, pre_vertex, identifier, allowed_edge_types): super().__init__( - identifier=identifier, allowed_edge_types=allowed_edge_types, - constraints=constraints, label=label, - traffic_weight=traffic_weight, class_name=class_name) + identifier=identifier, allowed_edge_types=allowed_edge_types) self._pre_vertex = pre_vertex @overrides(AbstractEdgePartition.add_edge) - def add_edge(self, edge, graph_code): + def add_edge(self, edge): if edge.pre_vertex != self._pre_vertex: raise PacmanConfigurationException( "A partition can only contain edges with the same pre_vertex") - super().add_edge(edge, graph_code) + super().add_edge(edge) @property def pre_vertex(self): diff --git a/pacman/model/graphs/abstract_virtual.py b/pacman/model/graphs/abstract_virtual.py index e3a801f32..950d98fc6 100644 --- a/pacman/model/graphs/abstract_virtual.py +++ b/pacman/model/graphs/abstract_virtual.py @@ -13,7 +13,7 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . -from spinn_utilities.abstract_base import abstractmethod, abstractproperty +from spinn_utilities.abstract_base import abstractproperty from spinn_utilities.require_subclass import require_subclass from pacman.model.graphs.abstract_vertex import AbstractVertex @@ -37,28 +37,3 @@ def board_address(self): :rtype: str """ - - @abstractmethod - def set_virtual_chip_coordinates(self, virtual_chip_x, virtual_chip_y): - """ Set the details of the virtual chip that has been added to the\ - machine for this vertex. - - :param int virtual_chip_x: The x-coordinate of the added chip - :param int virtual_chip_y: The y-coordinate of the added chip - """ - - @abstractproperty - def virtual_chip_x(self): - """ The x-coordinate of the virtual chip where this vertex is to be\ - placed. - - :rtype: int - """ - - @abstractproperty - def virtual_chip_y(self): - """ The y-coordinate of the virtual chip where this vertex is to be\ - placed. - - :rtype: int - """ diff --git a/pacman/model/graphs/application/abstract/abstract_one_app_one_machine_vertex.py b/pacman/model/graphs/application/abstract/abstract_one_app_one_machine_vertex.py index 5b7e93df4..6a615e73a 100644 --- a/pacman/model/graphs/application/abstract/abstract_one_app_one_machine_vertex.py +++ b/pacman/model/graphs/application/abstract/abstract_one_app_one_machine_vertex.py @@ -38,10 +38,10 @@ def __init__(self, machine_vertex, label, constraints, n_atoms=1): """ super().__init__(label, constraints, n_atoms) self._machine_vertex = machine_vertex + super().remember_machine_vertex(machine_vertex) @overrides(ApplicationVertex.remember_machine_vertex) def remember_machine_vertex(self, machine_vertex): - super().remember_machine_vertex(machine_vertex) assert (machine_vertex == self._machine_vertex) @property @@ -57,3 +57,9 @@ def machine_vertex(self): @overrides(ApplicationVertex.n_atoms) def n_atoms(self): return self._machine_vertex.vertex_slice.n_atoms + + @overrides(ApplicationVertex.reset) + def reset(self): + # Override, as we don't want to clear the machine vertices here! + if self._splitter is not None: + self._splitter.reset_called() diff --git a/pacman/model/graphs/application/application_edge.py b/pacman/model/graphs/application/application_edge.py index 2d24049af..2d2b5cc56 100644 --- a/pacman/model/graphs/application/application_edge.py +++ b/pacman/model/graphs/application/application_edge.py @@ -14,9 +14,7 @@ # along with this program. If not, see . from spinn_utilities.overrides import overrides -from spinn_utilities.ordered_set import OrderedSet from pacman.model.graphs import AbstractEdge -from pacman.model.graphs.machine import MachineEdge class ApplicationEdge(AbstractEdge): @@ -30,19 +28,12 @@ class ApplicationEdge(AbstractEdge): # The edge at the end of the vertex "_post_vertex", - # Machine edge type - "_machine_edge_type", - # The label - "_label", - - # Ordered set of associated machine edges - "__machine_edges" + "_label" ] def __init__( - self, pre_vertex, post_vertex, label=None, - machine_edge_type=MachineEdge): + self, pre_vertex, post_vertex, label=None): """ :param ApplicationVertex pre_vertex: The application vertex at the start of the edge. @@ -58,11 +49,6 @@ def __init__( self._label = label self._pre_vertex = pre_vertex self._post_vertex = post_vertex - if not issubclass(machine_edge_type, MachineEdge): - raise ValueError( - "machine_edge_type must be a kind of machine edge") - self._machine_edge_type = machine_edge_type - self.__machine_edges = OrderedSet() @property @overrides(AbstractEdge.label) @@ -78,25 +64,3 @@ def pre_vertex(self): @overrides(AbstractEdge.post_vertex) def post_vertex(self): return self._post_vertex - - @property - def machine_edges(self): - """ The machine - - :rtype: iterable(MachineEdge) - """ - return self.__machine_edges - - def remember_associated_machine_edge(self, machine_edge): - """ Adds the Machine Edge to the iterable returned by machine_edges - - :param MachineEdge machine_edge: A pointer to a machine_edge. - This edge may not be fully initialised - """ - self.__machine_edges.add(machine_edge) - - def forget_machine_edges(self): - """ Clear the collection of machine edges created by this application - edge. - """ - self.__machine_edges = OrderedSet() diff --git a/pacman/model/graphs/application/application_edge_partition.py b/pacman/model/graphs/application/application_edge_partition.py index 3fa8a7b2a..59c228944 100644 --- a/pacman/model/graphs/application/application_edge_partition.py +++ b/pacman/model/graphs/application/application_edge_partition.py @@ -13,9 +13,7 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . -from spinn_utilities.overrides import overrides -from pacman.model.graphs import ( - AbstractEdgePartition, AbstractSingleSourcePartition) +from pacman.model.graphs import AbstractSingleSourcePartition from .application_edge import ApplicationEdge @@ -27,29 +25,11 @@ class ApplicationEdgePartition(AbstractSingleSourcePartition): __slots__ = () - def __init__( - self, identifier, pre_vertex, constraints=None, label=None, - traffic_weight=1): + def __init__(self, identifier, pre_vertex): """ :param str identifier: The identifier of the partition :param ApplicationVertex pre_vertex: The source of this partition - :param list(AbstractConstraint) constraints: Any initial constraints - :param str label: An optional label of the partition - :param int traffic_weight: - The weight of traffic going down this partition - :param traffic_type: the traffic type acceptable here. """ super().__init__( pre_vertex=pre_vertex, identifier=identifier, - allowed_edge_types=ApplicationEdge, constraints=constraints, - label=label, traffic_weight=traffic_weight, - class_name="ApplicationEdgePartition") - - @overrides(AbstractEdgePartition.clone_without_edges) - def clone_without_edges(self): - """ - :rtype: ApplicationEdgePartition - """ - return ApplicationEdgePartition( - self._identifier, self._pre_vertex, self._constraints, - self._label, self._traffic_weight) + allowed_edge_types=ApplicationEdge) diff --git a/pacman/model/graphs/application/application_fpga_vertex.py b/pacman/model/graphs/application/application_fpga_vertex.py index 2320d3431..aff980f48 100644 --- a/pacman/model/graphs/application/application_fpga_vertex.py +++ b/pacman/model/graphs/application/application_fpga_vertex.py @@ -16,8 +16,6 @@ import sys from pacman.model.partitioner_interfaces import LegacyPartitionerAPI from spinn_utilities.overrides import overrides -from pacman.model.constraints.placer_constraints import ( - ChipAndCoreConstraint) from .application_vertex import ApplicationVertex from pacman.model.graphs import AbstractFPGA, AbstractVirtual from pacman.model.graphs.machine import MachineFPGAVertex @@ -33,8 +31,6 @@ class ApplicationFPGAVertex( "_fpga_id", "_fpga_link_id", "_board_address", - "_virtual_chip_x", - "_virtual_chip_y", "_n_atoms"] def __init__( @@ -48,8 +44,6 @@ def __init__( self._fpga_id = fpga_id self._fpga_link_id = fpga_link_id self._board_address = board_address - self._virtual_chip_x = None - self._virtual_chip_y = None @property @overrides(AbstractFPGA.fpga_id) @@ -66,32 +60,6 @@ def fpga_link_id(self): def board_address(self): return self._board_address - @property - @overrides(AbstractVirtual.virtual_chip_x) - def virtual_chip_x(self): - return self._virtual_chip_x - - @property - @overrides(AbstractVirtual.virtual_chip_y) - def virtual_chip_y(self): - return self._virtual_chip_y - - @overrides(AbstractVirtual.set_virtual_chip_coordinates) - def set_virtual_chip_coordinates(self, virtual_chip_x, virtual_chip_y): - if virtual_chip_x is not None and virtual_chip_y is not None: - self._virtual_chip_x = virtual_chip_x - self._virtual_chip_y = virtual_chip_y - if len(self._machine_vertices) != 0: - for machine_vertex in self._machine_vertices: - if (machine_vertex.virtual_chip_x != self._virtual_chip_x - or machine_vertex.virtual_chip_y != - virtual_chip_y): - machine_vertex.set_virtual_chip_coordinates( - self._virtual_chip_x, self._virtual_chip_y) - else: - self.add_constraint(ChipAndCoreConstraint( - self._virtual_chip_x, self._virtual_chip_y)) - @property @overrides(LegacyPartitionerAPI.n_atoms) def n_atoms(self): @@ -108,8 +76,6 @@ def create_machine_vertex( machine_vertex = MachineFPGAVertex( self._fpga_id, self._fpga_link_id, self._board_address, label, constraints, self, vertex_slice) - machine_vertex.set_virtual_chip_coordinates( - self._virtual_chip_x, self._virtual_chip_y) if resources_required: assert (resources_required == machine_vertex.resources_required) return machine_vertex diff --git a/pacman/model/graphs/application/application_graph.py b/pacman/model/graphs/application/application_graph.py index 7d1647c7a..5cfcbb82e 100644 --- a/pacman/model/graphs/application/application_graph.py +++ b/pacman/model/graphs/application/application_graph.py @@ -1,4 +1,4 @@ -# Copyright (c) 2017-2019 The University of Manchester +# Copyright (c) 2017-2022 The University of Manchester # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -31,6 +31,8 @@ class ApplicationGraph(Graph): __slots__ = [ # The sets of edge partitions by pre-vertex "_outgoing_edge_partitions_by_pre_vertex", + # The total number of outgoing edge partitions + "_n_outgoing_edge_partitions" ] def __init__(self, label): @@ -39,23 +41,8 @@ def __init__(self, label): :type label: str or None """ super().__init__(ApplicationVertex, ApplicationEdge, label) - self._outgoing_edge_partitions_by_pre_vertex = \ - defaultdict(OrderedSet) - - def forget_machine_graph(self): - """ Forget the whole mapping from this graph to an application graph. - """ - for v in self.vertices: - v.forget_machine_vertices() - for e in self.edges: - e.forget_machine_edges() - - def forget_machine_edges(self): - """ Ensure that all application edges in this graph forget what - machine edges they map to. The mapping of vertices is unaffected. - """ - for e in self.edges: - e.forget_machine_edges() + self._outgoing_edge_partitions_by_pre_vertex = defaultdict(OrderedSet) + self._n_outgoing_edge_partitions = 0 @overrides(Graph.new_edge_partition) def new_edge_partition(self, name, edge): @@ -73,31 +60,30 @@ def add_outgoing_edge_partition(self, edge_partition): # check this partition doesn't already exist key = (edge_partition.pre_vertex, edge_partition.identifier) - if key in self._outgoing_edge_partitions_by_name: + if edge_partition in self._outgoing_edge_partitions_by_pre_vertex[ + edge_partition.pre_vertex]: raise PacmanAlreadyExistsException( str(ApplicationEdgePartition), key) - edge_partition.register_graph_code(id(self)) - self._outgoing_edge_partitions_by_pre_vertex[ edge_partition.pre_vertex].add(edge_partition) - self._outgoing_edge_partitions_by_name[key] = edge_partition for edge in edge_partition.edges: self._register_edge(edge, edge_partition) + self._n_outgoing_edge_partitions += 1 + @property @overrides(Graph.outgoing_edge_partitions) def outgoing_edge_partitions(self): - # This is based on the assumption that an Application partition is - # always SingleSourced - return self._outgoing_edge_partitions_by_name.values() + for partitions in \ + self._outgoing_edge_partitions_by_pre_vertex.values(): + for partition in partitions: + yield partition @property @overrides(Graph.n_outgoing_edge_partitions) def n_outgoing_edge_partitions(self): - # This is based on the assumption that an Application partition is - # always SingleSourced - return len(self._outgoing_edge_partitions_by_name) + return self._n_outgoing_edge_partitions def get_outgoing_edge_partitions_starting_at_vertex(self, vertex): """ Get all the edge partitions that start at the given vertex. @@ -124,3 +110,9 @@ def clone(self): for edge in outgoing_partition.edges: new_graph.add_edge(edge, outgoing_partition.identifier) return new_graph + + def reset(self): + """ Reset all the application vertices + """ + for vertex in self.vertices: + vertex.reset() diff --git a/pacman/model/graphs/application/application_graph_view.py b/pacman/model/graphs/application/application_graph_view.py index 1eff65ac3..38c69fbb4 100644 --- a/pacman/model/graphs/application/application_graph_view.py +++ b/pacman/model/graphs/application/application_graph_view.py @@ -36,14 +36,10 @@ def __init__(self, other): self._vertex_by_label = other._vertex_by_label # should never be needed self._unlabelled_vertex_count = None - self._outgoing_edge_partitions_by_name = \ - other._outgoing_edge_partitions_by_name - self._outgoing_edges = other._outgoing_edges self._incoming_edges = other._incoming_edges - self._incoming_edges_by_partition_name = \ - other._incoming_edges_by_partition_name - self._outgoing_edge_partition_by_edge = \ - other._outgoing_edge_partition_by_edge + self._outgoing_edge_partitions_by_pre_vertex =\ + other._outgoing_edge_partitions_by_pre_vertex + self._n_outgoing_edge_partitions = other._n_outgoing_edge_partitions @overrides(ApplicationGraph.add_edge) def add_edge(self, edge, outgoing_edge_partition_name): diff --git a/pacman/model/graphs/application/application_spinnaker_link_vertex.py b/pacman/model/graphs/application/application_spinnaker_link_vertex.py index 745c0196c..e1874b252 100644 --- a/pacman/model/graphs/application/application_spinnaker_link_vertex.py +++ b/pacman/model/graphs/application/application_spinnaker_link_vertex.py @@ -16,8 +16,6 @@ import sys from spinn_utilities.overrides import overrides from pacman.model.partitioner_interfaces import LegacyPartitionerAPI -from pacman.model.constraints.placer_constraints import ( - ChipAndCoreConstraint) from .application_vertex import ApplicationVertex from pacman.model.resources import ResourceContainer from pacman.model.graphs import ( @@ -33,9 +31,7 @@ class ApplicationSpiNNakerLinkVertex( __slots__ = [ "_n_atoms", "_spinnaker_link_id", - "_board_address", - "_virtual_chip_x", - "_virtual_chip_y"] + "_board_address"] def __init__( self, n_atoms, spinnaker_link_id, board_address=None, label=None, @@ -46,8 +42,6 @@ def __init__( self._n_atoms = self.round_n_atoms(n_atoms) self._spinnaker_link_id = spinnaker_link_id self._board_address = board_address - self._virtual_chip_x = None - self._virtual_chip_y = None @property @overrides(AbstractSpiNNakerLink.spinnaker_link_id) @@ -59,32 +53,6 @@ def spinnaker_link_id(self): def board_address(self): return self._board_address - @property - @overrides(AbstractVirtual.virtual_chip_x) - def virtual_chip_x(self): - return self._virtual_chip_x - - @property - @overrides(AbstractVirtual.virtual_chip_y) - def virtual_chip_y(self): - return self._virtual_chip_y - - @overrides(AbstractVirtual.set_virtual_chip_coordinates) - def set_virtual_chip_coordinates(self, virtual_chip_x, virtual_chip_y): - if virtual_chip_x is not None and virtual_chip_y is not None: - self._virtual_chip_x = virtual_chip_x - self._virtual_chip_y = virtual_chip_y - if len(self._machine_vertices) != 0: - for machine_vertex in self._machine_vertices: - if (machine_vertex.virtual_chip_x != self._virtual_chip_x - or machine_vertex.virtual_chip_y != - virtual_chip_y): - machine_vertex.set_virtual_chip_coordinates( - self._virtual_chip_x, self._virtual_chip_y) - else: - self.add_constraint(ChipAndCoreConstraint( - self._virtual_chip_x, self._virtual_chip_y)) - @property @overrides(LegacyPartitionerAPI.n_atoms) def n_atoms(self): @@ -101,8 +69,6 @@ def create_machine_vertex( machine_vertex = MachineSpiNNakerLinkVertex( self._spinnaker_link_id, self._board_address, label, constraints, self, vertex_slice) - machine_vertex.set_virtual_chip_coordinates( - self._virtual_chip_x, self._virtual_chip_y) if resources_required: assert (resources_required == machine_vertex.resources_required) return machine_vertex diff --git a/pacman/model/graphs/application/application_vertex.py b/pacman/model/graphs/application/application_vertex.py index 4db1a244a..93ab5979f 100644 --- a/pacman/model/graphs/application/application_vertex.py +++ b/pacman/model/graphs/application/application_vertex.py @@ -20,8 +20,7 @@ from spinn_utilities.overrides import overrides from spinn_utilities.log import FormatAdapter from pacman.exceptions import ( - PacmanAlreadyExistsException, PacmanConfigurationException, - PacmanInvalidParameterException) + PacmanConfigurationException, PacmanInvalidParameterException) from pacman.model.graphs import AbstractVertex logger = FormatAdapter(logging.getLogger(__file__)) @@ -35,8 +34,6 @@ class ApplicationVertex(AbstractVertex, metaclass=AbstractBase): # List of machine verts associated with this app vertex "_machine_vertices", - "_vertex_slices", - # The splitter object associated with this app vertex "_splitter", @@ -67,7 +64,6 @@ def __init__(self, label=None, constraints=None, self._splitter = None super().__init__(label, constraints) self._machine_vertices = OrderedSet() - self._vertex_slices = None # Use setter as there is extra work to do self.splitter = splitter self._max_atoms_per_core = max_atoms_per_core @@ -115,21 +111,11 @@ def remember_machine_vertex(self, machine_vertex): """ Adds the Machine vertex the iterable returned by machine_vertices - This method will be called by MachineVertex.app_vertex - No other place should call it. - - :param MachineVertex machine_vertex: A pointer to a machine_vertex. - This vertex may not be fully initialized but will have a slice - :raises PacmanValueError: If the slice of the machine_vertex is too big + :param MachineVertex machine_vertex: A pointer to a machine_vertex """ machine_vertex.index = len(self._machine_vertices) - - if machine_vertex in self._machine_vertices: - raise PacmanAlreadyExistsException( - str(machine_vertex), machine_vertex) self._machine_vertices.add(machine_vertex) - self._vertex_slices = None @abstractproperty def n_atoms(self): @@ -162,25 +148,12 @@ def round_n_atoms(self, n_atoms, label="n_atoms"): @property def machine_vertices(self): - """ The machine vertices that this application vertex maps to.\ - Will be the same length as :py:meth:`vertex_slices`. + """ The machine vertices that this application vertex maps to :rtype: iterable(MachineVertex) """ return self._machine_vertices - @property - def vertex_slices(self): - """ The slices of this vertex that each machine vertex manages.\ - Will be the same length as :py:meth:`machine_vertices`. - - :rtype: iterable(Slice) - """ - if self._vertex_slices is None: - self._vertex_slices = \ - list(map(lambda x: x.vertex_slice, self._machine_vertices)) - return self._vertex_slices - def get_max_atoms_per_core(self): """ Gets the maximum number of atoms per core, which is either the\ number of atoms required across the whole application vertex,\ @@ -200,9 +173,9 @@ def set_max_atoms_per_core(self, new_value): """ self._max_atoms_per_core = new_value - def forget_machine_vertices(self): - """ Arrange to forget all machine vertices that this application - vertex maps to. + def reset(self): + """ Forget all machine vertices in the application vertex, and reset + the splitter (if any) """ self._machine_vertices = OrderedSet() if self._splitter is not None: diff --git a/pacman/model/graphs/common/__init__.py b/pacman/model/graphs/common/__init__.py index f284905f0..e49e88ca7 100644 --- a/pacman/model/graphs/common/__init__.py +++ b/pacman/model/graphs/common/__init__.py @@ -14,7 +14,6 @@ # along with this program. If not, see . from .constrained_object import ConstrainedObject -from .edge_traffic_type import EdgeTrafficType from .slice import Slice -__all__ = ["ConstrainedObject", "EdgeTrafficType", "Slice"] +__all__ = ["ConstrainedObject", "Slice"] diff --git a/pacman/model/graphs/graph.py b/pacman/model/graphs/graph.py index 19d1e88cb..de706e68d 100644 --- a/pacman/model/graphs/graph.py +++ b/pacman/model/graphs/graph.py @@ -1,4 +1,4 @@ -# Copyright (c) 2017-2019 The University of Manchester +# Copyright (c) 2017-2022 The University of Manchester # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -19,9 +19,6 @@ from spinn_utilities.ordered_set import OrderedSet from pacman.exceptions import ( PacmanAlreadyExistsException, PacmanInvalidParameterException) -from .abstract_edge_partition import AbstractEdgePartition -from .abstract_edge import AbstractEdge -from .abstract_vertex import AbstractVertex from pacman.model.graphs.common import ConstrainedObject @@ -36,17 +33,8 @@ class Graph(ConstrainedObject, metaclass=AbstractBase): "_allowed_edge_types", # The vertices of the graph "_vertices", - # The outgoing edge partitions of the graph by - # (edge.pre_vertex, outgoing_edge_partition_name) - "_outgoing_edge_partitions_by_name", - # The outgoing edges by pre-vertex - "_outgoing_edges", # The incoming edges by post-vertex "_incoming_edges", - # map between incoming edges and edge.post_vertex, edge_partition_name - "_incoming_edges_by_partition_name", - # the outgoing partitions by edge - "_outgoing_edge_partition_by_edge", # The label of the graph "_label", # map between labels and vertex @@ -71,11 +59,7 @@ def __init__(self, allowed_vertex_types, allowed_edge_types, label): self._vertices = [] self._vertex_by_label = dict() self._unlabelled_vertex_count = 0 - self._outgoing_edge_partitions_by_name = dict() - self._outgoing_edges = defaultdict(OrderedSet) self._incoming_edges = defaultdict(OrderedSet) - self._incoming_edges_by_partition_name = defaultdict(list) - self._outgoing_edge_partition_by_edge = dict() self._label = label @property @@ -152,7 +136,7 @@ def add_edge(self, edge, outgoing_edge_partition_name): outgoing_edge_partition_name, edge) self.add_outgoing_edge_partition(partition) self._register_edge(edge, partition) - partition.add_edge(edge, id(self)) + partition.add_edge(edge) return partition def _register_edge(self, edge, partition): @@ -184,13 +168,7 @@ def _register_edge(self, edge, partition): "Post-vertex must be known in graph") # Add the edge to the indices - self._outgoing_edges[edge.pre_vertex].add(edge) - self._incoming_edges_by_partition_name[ - edge.post_vertex, partition.identifier].append(edge) self._incoming_edges[edge.post_vertex].add(edge) - if edge in self._outgoing_edge_partition_by_edge: - raise PacmanAlreadyExistsException("edge", edge) - self._outgoing_edge_partition_by_edge[edge] = partition @abstractmethod def new_edge_partition(self, name, edge): @@ -280,14 +258,6 @@ def n_outgoing_edge_partitions(self): :rtype: int """ - def get_outgoing_partition_for_edge(self, edge): - """ Gets the partition this edge is associated with. - - :param AbstractEdge edge: the edge to find associated partition - :rtype: AbstractEdgePartition - """ - return self._outgoing_edge_partition_by_edge[edge] - def get_edges_starting_at_vertex(self, vertex): """ Get all the edges that start at the given vertex. @@ -295,7 +265,10 @@ def get_edges_starting_at_vertex(self, vertex): The vertex at which the edges to get start :rtype: iterable(AbstractEdge) """ - return self._outgoing_edges[vertex] + parts = self.get_outgoing_edge_partitions_starting_at_vertex(vertex) + for partition in parts: + for edge in partition.edges: + yield edge def get_edges_ending_at_vertex(self, vertex): """ Get all the edges that end at the given vertex. @@ -308,20 +281,6 @@ def get_edges_ending_at_vertex(self, vertex): return [] return self._incoming_edges[vertex] - def get_edges_ending_at_vertex_with_partition_name( - self, vertex, partition_name): - """ Get all the edges that end at the given vertex, and reside in the\ - correct partition ID. - - :param AbstractVertex vertex: The vertex at which the edges to get end - :param str partition_name: the label for the partition - :return: iterable(AbstractEdge) - """ - key = (vertex, partition_name) - if key not in self._incoming_edges_by_partition_name: - return [] - return self._incoming_edges_by_partition_name[key] - @abstractmethod def get_outgoing_edge_partitions_starting_at_vertex(self, vertex): """ Get all the edge partitions that start at the given vertex. @@ -343,21 +302,10 @@ def get_outgoing_edge_partition_starting_at_vertex( :return: the named edge partition, or None if no such partition exists :rtype: AbstractEdgePartition or None """ - return self._outgoing_edge_partitions_by_name.get( - (vertex, outgoing_edge_partition_name), None) - - def __contains__(self, value): - """ Determines if a value is an object that is in the graph. - - :param value: The object to see if it is in the graph - :type value: AbstractVertex or AbstractEdge or AbstractEdgePartition - :return: True if the value is in the graph, False otherwise - :rtype: bool - """ - if isinstance(value, AbstractEdgePartition): - return value in self._outgoing_edge_partitions_by_name.values() - elif isinstance(value, AbstractEdge): - return value in self._outgoing_edge_partition_by_edge - elif isinstance(value, AbstractVertex): - return value in self._vertices - return False + # In general, very few partitions start at a given vertex, so iteration + # isn't going to be as onerous as it looks + parts = self.get_outgoing_edge_partitions_starting_at_vertex(vertex) + for partition in parts: + if partition.identifier == outgoing_edge_partition_name: + return partition + return None diff --git a/pacman/model/graphs/machine/__init__.py b/pacman/model/graphs/machine/__init__.py index 870c9fdcc..d1390f07a 100644 --- a/pacman/model/graphs/machine/__init__.py +++ b/pacman/model/graphs/machine/__init__.py @@ -19,23 +19,18 @@ from .machine_spinnaker_link_vertex import MachineSpiNNakerLinkVertex from .machine_vertex import MachineVertex from .simple_machine_vertex import SimpleMachineVertex -from .abstract_machine_edge_partition import AbstractMachineEdgePartition from .abstract_sdram_partition import AbstractSDRAMPartition from .constant_sdram_machine_partition import ConstantSDRAMMachinePartition from .destination_segmented_sdram_machine_partition import ( DestinationSegmentedSDRAMMachinePartition) -from .fixed_route_edge_partition import FixedRouteEdgePartition from .multicast_edge_partition import MulticastEdgePartition from .source_segmented_sdram_machine_partition import ( SourceSegmentedSDRAMMachinePartition) -from .machine_graph import MachineGraph -from .machine_graph_view import MachineGraphView __all__ = [ - "AbstractMachineEdgePartition", "AbstractSDRAMPartition", - "ConstantSDRAMMachinePartition", - "DestinationSegmentedSDRAMMachinePartition", "FixedRouteEdgePartition", - "MachineEdge", "MachineFPGAVertex", "MachineGraph", "MachineGraphView", + "AbstractSDRAMPartition", "ConstantSDRAMMachinePartition", + "DestinationSegmentedSDRAMMachinePartition", + "MachineEdge", "MachineFPGAVertex", "MachineSpiNNakerLinkVertex", "MachineVertex", "MulticastEdgePartition", "SDRAMMachineEdge", "SimpleMachineVertex", "SourceSegmentedSDRAMMachinePartition"] diff --git a/pacman/model/graphs/machine/abstract_machine_edge_partition.py b/pacman/model/graphs/machine/abstract_machine_edge_partition.py deleted file mode 100644 index b62328c90..000000000 --- a/pacman/model/graphs/machine/abstract_machine_edge_partition.py +++ /dev/null @@ -1,50 +0,0 @@ -# Copyright (c) 2017-2019 The University of Manchester -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . -from pacman.exceptions import PacmanConfigurationException -from spinn_utilities.abstract_base import abstractproperty - - -class AbstractMachineEdgePartition(object): - """ A simple implementation of a machine edge partition that will \ - communicate with a traffic type. - """ - - __slots__ = () - - def check_edge(self, edge): - """ check a edge traffic type. - - :param AbstractEdge edge: the edge to check - :raises PacmanInvalidParameterException: - If the edge does not belong in this edge partition - """ - # Check for an incompatible traffic type - if edge.traffic_type != self.traffic_type: - raise PacmanConfigurationException( - "A partition can only contain edges with the same " - "traffic_type; trying to add a {} edge to a partition of " - "type {}".format(edge.traffic_type, self.traffic_type)) - - @abstractproperty - def traffic_type(self): - """ The traffic type of all the edges in this edge partition. - - .. note:: - The reason for a abstract property which all machine outgoing - partitions is purely due the need for multiple slots and python's - lack of support for this. - - :rtype: EdgeTrafficType - """ diff --git a/pacman/model/graphs/machine/abstract_sdram_partition.py b/pacman/model/graphs/machine/abstract_sdram_partition.py index a07866f17..9055b214c 100644 --- a/pacman/model/graphs/machine/abstract_sdram_partition.py +++ b/pacman/model/graphs/machine/abstract_sdram_partition.py @@ -13,11 +13,9 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . from spinn_utilities.abstract_base import abstractmethod, AbstractBase -from pacman.model.graphs.machine import AbstractMachineEdgePartition -class AbstractSDRAMPartition( - AbstractMachineEdgePartition, metaclass=AbstractBase): +class AbstractSDRAMPartition(object, metaclass=AbstractBase): """ An edge partition that contains SDRAM edges. """ diff --git a/pacman/model/graphs/machine/constant_sdram_machine_partition.py b/pacman/model/graphs/machine/constant_sdram_machine_partition.py index 57bc1b8b4..bf08a83a4 100644 --- a/pacman/model/graphs/machine/constant_sdram_machine_partition.py +++ b/pacman/model/graphs/machine/constant_sdram_machine_partition.py @@ -18,7 +18,6 @@ from pacman.exceptions import ( PacmanConfigurationException, PartitionMissingEdgesException, SDRAMEdgeSizeException) -from pacman.model.graphs.common import EdgeTrafficType from pacman.model.graphs.machine import SDRAMMachineEdge @@ -37,21 +36,14 @@ class ConstantSDRAMMachinePartition( MISSING_EDGE_ERROR_MESSAGE = "Partition {} has no edges" - def __init__(self, identifier, pre_vertex, label): + def __init__(self, identifier, pre_vertex): super().__init__( - pre_vertex, identifier, allowed_edge_types=SDRAMMachineEdge, - constraints=None, label=label, traffic_weight=1, - class_name="ConstantSDRAMMachinePartition") + pre_vertex, identifier, allowed_edge_types=SDRAMMachineEdge) self._sdram_size = None self._sdram_base_address = None - @property - @overrides(AbstractSDRAMPartition.traffic_type) - def traffic_type(self): - return EdgeTrafficType.SDRAM - @overrides(AbstractSingleSourcePartition.add_edge) - def add_edge(self, edge, graph_code): + def add_edge(self, edge): if self._sdram_size is None: self._sdram_size = edge.sdram_size elif self._sdram_size != edge.sdram_size: @@ -59,7 +51,7 @@ def add_edge(self, edge, graph_code): "The edges within the constant sdram partition {} have " "inconsistent memory size requests.".format(self)) if self._sdram_base_address is None: - super().add_edge(edge, graph_code) + super().add_edge(edge) else: raise PacmanConfigurationException( "Illegal attempt to add an edge after sdram_base_address set") @@ -96,11 +88,3 @@ def get_sdram_size_of_region_for(self, vertex): if len(self._edges) == 0: return 0 return self._edges.peek().sdram_size - - @overrides(AbstractSingleSourcePartition.clone_without_edges) - def clone_without_edges(self): - """ - :rtype: ConstantSDRAMMachinePartition - """ - return ConstantSDRAMMachinePartition( - self._identifier, self._pre_vertex, self._label) diff --git a/pacman/model/graphs/machine/destination_segmented_sdram_machine_partition.py b/pacman/model/graphs/machine/destination_segmented_sdram_machine_partition.py index 8f997f2ef..0571e5fc7 100644 --- a/pacman/model/graphs/machine/destination_segmented_sdram_machine_partition.py +++ b/pacman/model/graphs/machine/destination_segmented_sdram_machine_partition.py @@ -17,7 +17,6 @@ from pacman.exceptions import ( PacmanConfigurationException, PartitionMissingEdgesException) from pacman.model.graphs import AbstractSingleSourcePartition -from pacman.model.graphs.common import EdgeTrafficType from pacman.model.graphs.machine import ( AbstractSDRAMPartition, SDRAMMachineEdge) @@ -32,19 +31,12 @@ class DestinationSegmentedSDRAMMachinePartition( "_sdram_base_address", ] - def __init__(self, identifier, pre_vertex, label): + def __init__(self, identifier, pre_vertex): super().__init__( pre_vertex=pre_vertex, identifier=identifier, - allowed_edge_types=SDRAMMachineEdge, constraints=None, - label=label, traffic_weight=1, - class_name="DestinationSegmentedSDRAMMachinePartition") + allowed_edge_types=SDRAMMachineEdge) self._sdram_base_address = None - @property - @overrides(AbstractSDRAMPartition.traffic_type) - def traffic_type(self): - return EdgeTrafficType.SDRAM - @overrides(AbstractSDRAMPartition.total_sdram_requirements) def total_sdram_requirements(self): return sum(edge.sdram_size for edge in self.edges) @@ -64,13 +56,11 @@ def sdram_base_address(self, new_value): new_value += edge.sdram_size @overrides(AbstractSingleSourcePartition.add_edge) - def add_edge(self, edge, graph_code): + def add_edge(self, edge): if self._sdram_base_address is not None: raise PacmanConfigurationException( "Illegal attempt to add an edge after sdram_base_address set") - super().check_edge(edge) - # safety check if self._pre_vertex != edge.pre_vertex: raise PacmanConfigurationException( @@ -78,7 +68,7 @@ def add_edge(self, edge, graph_code): "1 pre-vertex") # add - super().add_edge(edge, graph_code) + super().add_edge(edge) @overrides(AbstractSDRAMPartition.get_sdram_base_address_for) def get_sdram_base_address_for(self, vertex): @@ -97,11 +87,3 @@ def get_sdram_size_of_region_for(self, vertex): if edge.post_vertex == vertex: return edge.sdram_size return None - - @overrides(AbstractSingleSourcePartition.clone_without_edges) - def clone_without_edges(self): - """ - :rtype: DestinationSegmentedSDRAMMachinePartition - """ - return DestinationSegmentedSDRAMMachinePartition( - self._identifier, self._pre_vertex, self._label) diff --git a/pacman/model/graphs/machine/fixed_route_edge_partition.py b/pacman/model/graphs/machine/fixed_route_edge_partition.py deleted file mode 100644 index 21af7da76..000000000 --- a/pacman/model/graphs/machine/fixed_route_edge_partition.py +++ /dev/null @@ -1,66 +0,0 @@ -# Copyright (c) 2017-2019 The University of Manchester -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . -from pacman.model.graphs.machine.abstract_machine_edge_partition import ( - AbstractMachineEdgePartition) -from spinn_utilities.overrides import overrides -from pacman.model.graphs.common import EdgeTrafficType -from pacman.model.graphs import ( - AbstractEdgePartition, AbstractSingleSourcePartition) -from pacman.model.graphs.machine.machine_edge import MachineEdge - - -class FixedRouteEdgePartition( - AbstractSingleSourcePartition, AbstractMachineEdgePartition): - """ A simple implementation of a machine edge partition that will \ - communicate with SpiNNaker multicast packets. They have a common set \ - of sources with the same semantics and so can share a single key. - """ - - __slots__ = () - - def __init__( - self, pre_vertex, identifier, constraints=None, label=None, - traffic_weight=1): - """ - :param str identifier: The identifier of the partition - :param list(AbstractConstraint) constraints: Any initial constraints - :param str label: An optional label of the partition - :param int traffic_weight: - The weight of traffic going down this partition - """ - super().__init__( - pre_vertex=pre_vertex, identifier=identifier, - allowed_edge_types=MachineEdge, constraints=constraints, - label=label, traffic_weight=traffic_weight, - class_name="SingleSourceMachineEdgePartition") - - @overrides(AbstractSingleSourcePartition.add_edge) - def add_edge(self, edge, graph_code): - super().check_edge(edge) - super().add_edge(edge, graph_code) - - @property - @overrides(AbstractMachineEdgePartition.traffic_type) - def traffic_type(self): - return EdgeTrafficType.FIXED_ROUTE - - @overrides(AbstractEdgePartition.clone_without_edges) - def clone_without_edges(self): - """ - :rtype: FixedRouteEdgePartition - """ - return FixedRouteEdgePartition( - self._pre_vertex, self._identifier, self._constraints, - self._label, self._traffic_weight) diff --git a/pacman/model/graphs/machine/machine_edge.py b/pacman/model/graphs/machine/machine_edge.py index 234b4c5e3..d5b0441fc 100644 --- a/pacman/model/graphs/machine/machine_edge.py +++ b/pacman/model/graphs/machine/machine_edge.py @@ -14,7 +14,6 @@ # along with this program. If not, see . from spinn_utilities.overrides import overrides -from pacman.model.graphs.common import EdgeTrafficType from pacman.model.graphs import AbstractEdge @@ -29,52 +28,20 @@ class MachineEdge(AbstractEdge): # The vertex at the end of the edge "_post_vertex", - # The type of traffic for this edge - "_traffic_type", - - # The traffic weight of the edge - "_traffic_weight", - # The label of the edge - "_label", - - # The originating application edge - "_app_edge" + "_label" ] - def __init__( - self, pre_vertex, post_vertex, - traffic_type=EdgeTrafficType.MULTICAST, label=None, - traffic_weight=1, app_edge=None): + def __init__(self, pre_vertex, post_vertex, label=None): """ :param MachineVertex pre_vertex: The vertex at the start of the edge. :param MachineVertex post_vertex: The vertex at the end of the edge. - :param EdgeTrafficType traffic_type: - The type of traffic that this edge will carry. :param label: The name of the edge. :type label: str or None - :param int traffic_weight: - The optional weight of traffic expected to travel down this edge - relative to other edges. (default is 1) - :param app_edge: The application edge from which this was created. - If `None`, this edge is part of a pure machine graph. - :type app_edge: ApplicationEdge or None """ self._label = label self._pre_vertex = pre_vertex self._post_vertex = post_vertex - self._traffic_type = traffic_type - self._traffic_weight = traffic_weight - self._app_edge = app_edge - # depends on self._app_edge being set - self.associate_application_edge() - - def associate_application_edge(self): - """ - Asks the application edge (if any) to remember this machine edge. - """ - if self._app_edge: - self._app_edge.remember_associated_machine_edge(self) @property @overrides(AbstractEdge.label) @@ -99,33 +66,7 @@ def post_vertex(self): """ return self._post_vertex - @property - def traffic_type(self): - """ - :rtype: EdgeTrafficType - """ - return self._traffic_type - - @property - def app_edge(self): - """ The application edge from which this was created - - :rtype: ApplicationEdge or None - """ - return self._app_edge - - @property - def traffic_weight(self): - """ The amount of traffic expected to go down this edge relative to - other edges. - - :rtype: int - """ - return self._traffic_weight - def __repr__(self): return ( - "MachineEdge(pre_vertex={}, post_vertex={}, " - "traffic_type={}, label={}, traffic_weight={})".format( - self._pre_vertex, self._post_vertex, self._traffic_type, - self.label, self._traffic_weight)) + "MachineEdge(pre_vertex={}, post_vertex={}, label={})".format( + self._pre_vertex, self._post_vertex, self.label)) diff --git a/pacman/model/graphs/machine/machine_fpga_vertex.py b/pacman/model/graphs/machine/machine_fpga_vertex.py index 9ec3c0fb6..955e30217 100644 --- a/pacman/model/graphs/machine/machine_fpga_vertex.py +++ b/pacman/model/graphs/machine/machine_fpga_vertex.py @@ -14,7 +14,6 @@ # along with this program. If not, see . from spinn_utilities.overrides import overrides -from pacman.model.constraints.placer_constraints import ChipAndCoreConstraint from pacman.model.graphs import AbstractFPGA, AbstractVirtual from pacman.model.resources import ResourceContainer from .machine_vertex import MachineVertex @@ -27,9 +26,7 @@ class MachineFPGAVertex(MachineVertex, AbstractFPGA): __slots__ = [ "_fpga_id", "_fpga_link_id", - "_board_address", - "_virtual_chip_x", - "_virtual_chip_y"] + "_board_address"] def __init__( self, fpga_id, fpga_link_id, board_address=None, label=None, @@ -41,8 +38,6 @@ def __init__( self._fpga_id = fpga_id self._fpga_link_id = fpga_link_id self._board_address = board_address - self._virtual_chip_x = None - self._virtual_chip_y = None @property @overrides(MachineVertex.resources_required) @@ -63,25 +58,3 @@ def fpga_link_id(self): @overrides(AbstractVirtual.board_address) def board_address(self): return self._board_address - - @property - @overrides(AbstractVirtual.virtual_chip_x) - def virtual_chip_x(self): - return self._virtual_chip_x - - @property - @overrides(AbstractVirtual.virtual_chip_y) - def virtual_chip_y(self): - return self._virtual_chip_y - - @overrides(AbstractVirtual.set_virtual_chip_coordinates) - def set_virtual_chip_coordinates(self, virtual_chip_x, virtual_chip_y): - if virtual_chip_x is not None and virtual_chip_y is not None: - self._virtual_chip_x = virtual_chip_x - self._virtual_chip_y = virtual_chip_y - self.add_constraint(ChipAndCoreConstraint( - self._virtual_chip_x, self._virtual_chip_y)) - if (self._app_vertex is not None and - self._app_vertex.virtual_chip_x is None and - self._app_vertex.virtual_chip_y is None): - self._app_vertex.set_virtual_chip_coordinates() diff --git a/pacman/model/graphs/machine/machine_graph.py b/pacman/model/graphs/machine/machine_graph.py deleted file mode 100644 index 05be4e2d8..000000000 --- a/pacman/model/graphs/machine/machine_graph.py +++ /dev/null @@ -1,319 +0,0 @@ -# Copyright (c) 2017-2019 The University of Manchester -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . - -from collections import defaultdict -from .machine_vertex import MachineVertex -from .machine_edge import MachineEdge -from spinn_utilities.ordered_set import OrderedSet -from spinn_utilities.overrides import overrides -from pacman.exceptions import ( - PacmanAlreadyExistsException, PacmanInvalidParameterException) -from pacman.model.graphs import Graph -from pacman.model.graphs.common import EdgeTrafficType -from pacman.model.graphs.machine import ( - AbstractMachineEdgePartition, AbstractSDRAMPartition, - FixedRouteEdgePartition, MulticastEdgePartition) - - -class MachineGraph(Graph): - """ A graph whose vertices can fit on the chips of a machine. - """ - - __slots__ = [ - # Flags to say the application level is used so all machine vertices - # will have an application vertex - "_application_level_used", - # Ordered set of partitions - "_edge_partitions", - # A double dictionary of MULTICAST edges by their - # application id and then their (partition name) - "_multicast_partitions", - # The sets of multicast edge partitions by pre-vertex - "_multicast_edge_partitions_by_pre_vertex", - # The sets of fixed_point edge partitions by pre-vertex - "_fixed_route_edge_partitions_by_pre_vertex", - # The sdram outgoing edge partitions by pre-vertex - "_sdram_edge_partitions_by_pre_vertex", - # The sets of multicast edge partitions by pre-vertex - "_multicast_edge_partitions_by_post_vertex", - # The sets of fixed_point edge partitions by pre-vertex - "_fixed_route_edge_partitions_by_post_vertex", - # The sdram outgoing edge partitions by pre-vertex - "_sdram_edge_partitions_by_post_vertex", - ] - - MISSING_APP_VERTEX_ERROR_MESSAGE = ( - "The vertex does not have an app_vertex, " - "which is required when other app_vertices exist.") - - UNEXPECTED_APP_VERTEX_ERROR_MESSAGE = ( - "The vertex has an app_vertex, " - "which is not allowed when other vertices not have app_vertices.") - - def __init__(self, label, application_graph=None): - """ - :param label: The label for the graph. - :type label: str or None - :param application_graph: - The application graph that this machine graph is derived from, if - it is derived from one at all. - :type application_graph: ApplicationGraph or None - """ - super().__init__(MachineVertex, MachineEdge, label) - if application_graph: - application_graph.forget_machine_graph() - # Check the first vertex added - self._application_level_used = True - else: - # Must be false as there is no App_graph - self._application_level_used = False - self._multicast_partitions = defaultdict( - lambda: defaultdict(set)) - self._edge_partitions = OrderedSet() - self._fixed_route_edge_partitions_by_pre_vertex = ( - defaultdict(OrderedSet)) - self._multicast_edge_partitions_by_pre_vertex = ( - defaultdict(OrderedSet)) - self._sdram_edge_partitions_by_pre_vertex = ( - defaultdict(OrderedSet)) - self._fixed_route_edge_partitions_by_post_vertex = ( - defaultdict(OrderedSet)) - self._multicast_edge_partitions_by_post_vertex = ( - defaultdict(OrderedSet)) - self._sdram_edge_partitions_by_post_vertex = ( - defaultdict(OrderedSet)) - - @overrides(Graph.add_edge) - def add_edge(self, edge, outgoing_edge_partition_name): - edge_partition = super().add_edge( - edge, outgoing_edge_partition_name) - if (isinstance(edge_partition, MulticastEdgePartition)): - if edge.pre_vertex.app_vertex: - by_app = self._multicast_partitions[ - edge.pre_vertex.app_vertex] - else: - by_app = self._multicast_partitions[ - edge.pre_vertex] - by_partition = by_app[outgoing_edge_partition_name] - by_partition.add(edge.pre_vertex) - self._multicast_edge_partitions_by_post_vertex[ - edge.post_vertex].add(edge_partition) - elif isinstance(edge_partition, FixedRouteEdgePartition): - self._fixed_route_edge_partitions_by_post_vertex[ - edge.post_vertex].add(edge_partition) - elif isinstance(edge_partition, AbstractSDRAMPartition): - self._sdram_edge_partitions_by_post_vertex[ - edge.post_vertex].add(edge_partition) - else: - raise NotImplementedError( - "Unexpected edge_partition: {}".format(edge_partition)) - return edge_partition - - @property - def multicast_partitions(self): - """ - Returns a double dictionary of app id then - outgoing_edge_partition_name to a set of machine_vertex that act as - pre vertices for these multicast edges - - The app_id is normally the (machine) edge.pre_vertex.app_vertex. - This then groups the edges which come from the same app_vertex - If the (machine) edge.pre_vertex has no app vertex then the app_id will - be the machine vertex which will then form its own group of 1 - - :rtype: dict(ApplicationVertex, dict(str, set(MachineVertex)) - """ - return self._multicast_partitions - - @overrides(Graph.add_vertex) - def add_vertex(self, vertex): - super().add_vertex(vertex) - if self._application_level_used: - try: - vertex.app_vertex.remember_machine_vertex(vertex) - except AttributeError as e: - if self.n_vertices == 1: - self._application_level_used = False - else: - raise PacmanInvalidParameterException( - "vertex", str(vertex), - self.MISSING_APP_VERTEX_ERROR_MESSAGE) from e - elif vertex.app_vertex: - raise PacmanInvalidParameterException( - "vertex", vertex, self.UNEXPECTED_APP_VERTEX_ERROR_MESSAGE) - - @overrides(Graph.add_outgoing_edge_partition) - def add_outgoing_edge_partition(self, edge_partition): - # verify that this partition is suitable for this graph - if not isinstance(edge_partition, AbstractMachineEdgePartition): - raise PacmanInvalidParameterException( - "outgoing_edge_partition", str(edge_partition.__class__), - "Partitions of this graph must be an " - "AbstractMachineEdgePartition") - - # check this partition doesn't already exist - if edge_partition in self._edge_partitions: - raise PacmanAlreadyExistsException( - str(AbstractMachineEdgePartition), edge_partition) - - self._edge_partitions.add(edge_partition) - edge_partition.register_graph_code(id(self)) - - for pre_vertex in edge_partition.pre_vertices: - key = (pre_vertex, edge_partition.identifier) - self._outgoing_edge_partitions_by_name[key] = edge_partition - if isinstance(edge_partition, MulticastEdgePartition): - self._multicast_edge_partitions_by_pre_vertex[ - pre_vertex].add(edge_partition) - elif isinstance(edge_partition, FixedRouteEdgePartition): - self._fixed_route_edge_partitions_by_pre_vertex[ - pre_vertex].add(edge_partition) - elif isinstance(edge_partition, AbstractSDRAMPartition): - self._sdram_edge_partitions_by_pre_vertex[ - pre_vertex].add(edge_partition) - else: - raise NotImplementedError( - "Unexpected edge_partition: {}".format(edge_partition)) - for edge in edge_partition.edges: - self._register_edge(edge, edge_partition) - - @overrides(Graph.new_edge_partition) - def new_edge_partition(self, name, edge): - if edge.traffic_type == EdgeTrafficType.FIXED_ROUTE: - return FixedRouteEdgePartition( - identifier=name, pre_vertex=edge.pre_vertex) - elif edge.traffic_type == EdgeTrafficType.MULTICAST: - return MulticastEdgePartition( - identifier=name, pre_vertex=edge.pre_vertex) - else: - raise PacmanInvalidParameterException( - "edge", edge, - "Unable to add an Edge with traffic type {} unless you first " - "add a partition for it".format(edge.traffic_type)) - - @property - @overrides(Graph.outgoing_edge_partitions) - def outgoing_edge_partitions(self): - return self._edge_partitions - - @property - @overrides(Graph.n_outgoing_edge_partitions) - def n_outgoing_edge_partitions(self): - return len(self._edge_partitions) - - def get_fixed_route_edge_partitions_starting_at_vertex(self, vertex): - """ Get only the fixed_route edge partitions that start at the vertex. - - :param MachineVertex vertex: - The vertex at which the edge partitions to find starts - :rtype: iterable(FixedRouteEdgePartition) - """ - return self._fixed_route_edge_partitions_by_pre_vertex.get(vertex, []) - - def get_multicast_edge_partitions_starting_at_vertex(self, vertex): - """ Get only the multicast edge partitions that start at the vertex. - - :param MachineVertex vertex: - The vertex at which the edge partitions to find starts - :rtype: iterable(MulticastEdgePartition) - """ - return self._multicast_edge_partitions_by_pre_vertex.get(vertex, []) - - def get_sdram_edge_partitions_starting_at_vertex(self, vertex): - """ Get all the SDRAM edge partitions that start at the given vertex. - - :param MachineVertex vertex: - The vertex at which the sdram edge partitions to find starts - :rtype: iterable(AbstractSDRAMPartition) - """ - return self._sdram_edge_partitions_by_pre_vertex.get(vertex, []) - - @overrides(Graph.get_outgoing_edge_partitions_starting_at_vertex) - def get_outgoing_edge_partitions_starting_at_vertex(self, vertex): - for partition in self.\ - get_fixed_route_edge_partitions_starting_at_vertex(vertex): - yield partition - for partition in \ - self.get_multicast_edge_partitions_starting_at_vertex(vertex): - yield partition - for partition in \ - self.get_sdram_edge_partitions_starting_at_vertex(vertex): - yield partition - - def get_fixed_route_edge_partitions_ending_at_vertex(self, vertex): - """ Get only the fixed_route edge partitions that end at the vertex. - - :param MachineVertex vertex: - The vertex at which the edge partitions to find starts - :rtype: iterable(FixedRouteEdgePartition) - """ - return self._fixed_route_edge_partitions_by_post_vertex.get(vertex, []) - - def get_multicast_edge_partitions_ending_at_vertex(self, vertex): - """ Get only the multicast edge partitions that end at the vertex. - - :param MachineVertex vertex: - The vertex at which the edge partitions to find starts - :rtype: iterable(MulticastEdgePartition) - """ - return self._multicast_edge_partitions_by_post_vertex.get(vertex, []) - - def get_sdram_edge_partitions_ending_at_vertex(self, vertex): - """ Get all the sdram edge partitions that end at the given vertex. - - :param MachineVertex vertex: - The vertex at which the SDRAM edge partitions to find starts - :rtype: iterable(AbstractSDRAMPartition) - """ - return self._sdram_edge_partitions_by_post_vertex.get(vertex, []) - - def get_edge_partitions_ending_at_vertex(self, vertex): - """ Get all the edge partitions that end at the given vertex. - - :param MachineVertex vertex: - The vertex at which the SDRAM edge partitions to find starts - :rtype: iterable(AbstractPartition) - """ - for partition in \ - self.get_fixed_route_edge_partitions_ending_at_vertex(vertex): - yield partition - for partition in \ - self.get_multicast_edge_partitions_ending_at_vertex(vertex): - yield partition - for partition in \ - self.get_sdram_edge_partitions_ending_at_vertex(vertex): - yield partition - - def clone(self): - """ - Makes as shallow as possible copy of the graph. - - Vertices and edges are copied over. Partition will be new objects. - - :return: A shallow copy of this graph - :rtype: MachineGraph - :raises PacmanInvalidParameterException: - If called on a none empty graph when Application Vertexes exist - """ - new_graph = MachineGraph(self.label) - for vertex in self.vertices: - new_graph.add_vertex(vertex) - for outgoing_partition in \ - self.outgoing_edge_partitions: - new_outgoing_partition = outgoing_partition.clone_without_edges() - new_graph.add_outgoing_edge_partition(new_outgoing_partition) - for edge in outgoing_partition.edges: - new_graph.add_edge(edge, outgoing_partition.identifier) - return new_graph diff --git a/pacman/model/graphs/machine/machine_graph_view.py b/pacman/model/graphs/machine/machine_graph_view.py deleted file mode 100644 index 720106b7b..000000000 --- a/pacman/model/graphs/machine/machine_graph_view.py +++ /dev/null @@ -1,76 +0,0 @@ -# Copyright (c) 2017-2019 The University of Manchester -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . - -from spinn_utilities.overrides import overrides -from pacman.exceptions import PacmanConfigurationException -from .machine_graph import MachineGraph - - -class MachineGraphView(MachineGraph): - """ A frozen view of a Machine Graph - - As this shares all the same objects as the graph it is a view over except - for the class and id. So any changes to the other are reflected. - - All methods that allow changes to the graph should be disabled. - """ - - __slots__ = [] - - def __init__(self, other): - super().__init__(other.label) - # Reused the objects as they are unmutable and may be keys - self._vertices = other._vertices - self._vertex_by_label = other._vertex_by_label - # should never be needed - self._unlabelled_vertex_count = None - self._outgoing_edge_partitions_by_name = \ - other._outgoing_edge_partitions_by_name - self._outgoing_edges = other._outgoing_edges - self._incoming_edges = other._incoming_edges - self._incoming_edges_by_partition_name = \ - other._incoming_edges_by_partition_name - self._outgoing_edge_partition_by_edge = \ - other._outgoing_edge_partition_by_edge - self._application_level_used = other._application_level_used - self._multicast_partitions = other._multicast_partitions - self._edge_partitions = other._edge_partitions - self._fixed_route_edge_partitions_by_pre_vertex = \ - other._fixed_route_edge_partitions_by_pre_vertex - self._multicast_edge_partitions_by_pre_vertex = \ - other._multicast_edge_partitions_by_pre_vertex - self._sdram_edge_partitions_by_pre_vertex = \ - other._sdram_edge_partitions_by_pre_vertex - self._fixed_route_edge_partitions_by_post_vertex = \ - other._fixed_route_edge_partitions_by_post_vertex - self._multicast_edge_partitions_by_post_vertex = \ - other._multicast_edge_partitions_by_post_vertex - self._sdram_edge_partitions_by_post_vertex = \ - other._sdram_edge_partitions_by_post_vertex - - @overrides(MachineGraph.add_edge) - def add_edge(self, edge, outgoing_edge_partition_name): - raise PacmanConfigurationException( - "Please add edges via simulator not directly to this graph") - - @overrides(MachineGraph.add_vertex) - def add_vertex(self, vertex): - raise PacmanConfigurationException( - "Please add vertices via simulator not directly to this graph") - - @overrides(MachineGraph.add_outgoing_edge_partition) - def add_outgoing_edge_partition(self, edge_partition): - raise PacmanConfigurationException( - "Please add partitions via simulator not directly to this graph") diff --git a/pacman/model/graphs/machine/machine_sdram_edge.py b/pacman/model/graphs/machine/machine_sdram_edge.py index 97021d3c8..37bbc2a3d 100644 --- a/pacman/model/graphs/machine/machine_sdram_edge.py +++ b/pacman/model/graphs/machine/machine_sdram_edge.py @@ -12,11 +12,9 @@ # # You should have received a copy of the GNU General Public License # along with this program. If not, see . -from pacman.exceptions import SDRAMEdgeSizeException -from pacman.model.graphs.abstract_supports_sdram_edges import ( - AbstractSupportsSDRAMEdges) -from pacman.model.graphs.common import EdgeTrafficType from pacman.model.graphs.machine import MachineEdge +from pacman.model.graphs import AbstractSupportsSDRAMEdges +from pacman.exceptions import PacmanConfigurationException class SDRAMMachineEdge(MachineEdge): @@ -28,64 +26,14 @@ class SDRAMMachineEdge(MachineEdge): "_sdram_base_address" ] - NO_SUPPORT_MESSAGE = ( - "The {}vertex {} does not implement the AbstractSupportsSDRAMEdges" - " API that can be found at \' " - "pacman.model.graphs.abstract_supports_sdram_edges \'. Please fix and" - " try again so that sdram edge {} can know its required size.") - - DISAGREEMENT_MESSAGE = ( - "The pre vertex sdram size {} does not agree with the post vertex " - "sdram size {}. The SDRAM machine edge does not yet know how to " - "handle this case. Please fix and try again.") - - def __init__(self, pre_vertex, post_vertex, label, app_edge=None): - super().__init__( - pre_vertex, post_vertex, traffic_type=EdgeTrafficType.SDRAM, - label=label, traffic_weight=1, app_edge=app_edge) - - (pre_vertex_sdram, post_vertex_sdram) = self.__get_vertex_sdrams() - self._sdram_size = self.__check_vertex_sdram_sizes( - pre_vertex_sdram, post_vertex_sdram) + def __init__(self, pre_vertex, post_vertex, label): + if not isinstance(pre_vertex, AbstractSupportsSDRAMEdges): + raise PacmanConfigurationException( + f"Pre-vertex {pre_vertex} doesn't support SDRAM edges") + super().__init__(pre_vertex, post_vertex, label=label) + self._sdram_size = pre_vertex.sdram_requirement(self) self._sdram_base_address = None - def __check_vertex_sdram_sizes(self, pre_vertex_sdram, post_vertex_sdram): - """ checks that the sdram request is consistent between the vertices. - - :param int pre_vertex_sdram: pre vertex sdram requirement - :param int post_vertex_sdram: post vertex sdram requirement - :return: the sdram requirement. - :rtype: int - :raises SDRAMEdgeSizeException: if the values disagree - """ - if pre_vertex_sdram == post_vertex_sdram: - return pre_vertex_sdram - else: - raise SDRAMEdgeSizeException(self.DISAGREEMENT_MESSAGE.format( - pre_vertex_sdram, post_vertex_sdram)) - - def __get_vertex_sdrams(self): - """ query the vertices to find the sdram requirements. - - :return: tuple of pre and post sdram costs. - :rtype: tuple(int, int) - :rtype SDRAMEdgeSizeException: - if either vertex does not support SDRAM edges. - """ - - if isinstance(self.pre_vertex, AbstractSupportsSDRAMEdges): - pre_vertex_sdram_size = self.pre_vertex.sdram_requirement(self) - else: - raise SDRAMEdgeSizeException( - self.NO_SUPPORT_MESSAGE.format("pre", self.pre_vertex, self)) - - if isinstance(self.post_vertex, AbstractSupportsSDRAMEdges): - post_vertex_sdram_size = self.post_vertex.sdram_requirement(self) - else: - raise SDRAMEdgeSizeException(self.NO_SUPPORT_MESSAGE.format( - "post", self.post_vertex, self)) - return pre_vertex_sdram_size, post_vertex_sdram_size - @property def sdram_size(self): return self._sdram_size @@ -99,7 +47,9 @@ def sdram_base_address(self, new_value): self._sdram_base_address = new_value def __repr__(self): - return f"EdgramEdge {self.label} " + return (f"SDRAMMachineEdge(pre_vertex={self.pre_vertex}," + f" post_vertex={self.post_vertex}, label={self.label}," + f" sdram_size={self.sdram_size})") def __str__(self): return self.__repr__() diff --git a/pacman/model/graphs/machine/machine_spinnaker_link_vertex.py b/pacman/model/graphs/machine/machine_spinnaker_link_vertex.py index 58b02baf4..f8404607f 100644 --- a/pacman/model/graphs/machine/machine_spinnaker_link_vertex.py +++ b/pacman/model/graphs/machine/machine_spinnaker_link_vertex.py @@ -18,7 +18,6 @@ from .machine_vertex import MachineVertex from pacman.model.graphs import ( AbstractVirtual, AbstractSpiNNakerLink) -from pacman.model.constraints.placer_constraints import ChipAndCoreConstraint class MachineSpiNNakerLinkVertex(MachineVertex, AbstractSpiNNakerLink): @@ -27,9 +26,7 @@ class MachineSpiNNakerLinkVertex(MachineVertex, AbstractSpiNNakerLink): __slots__ = [ "_spinnaker_link_id", - "_board_address", - "_virtual_chip_x", - "_virtual_chip_y"] + "_board_address"] def __init__( self, spinnaker_link_id, board_address=None, label=None, @@ -39,8 +36,6 @@ def __init__( vertex_slice=vertex_slice) self._spinnaker_link_id = spinnaker_link_id self._board_address = board_address - self._virtual_chip_x = None - self._virtual_chip_y = None @property @overrides(MachineVertex.resources_required) @@ -56,25 +51,3 @@ def spinnaker_link_id(self): @overrides(AbstractVirtual.board_address) def board_address(self): return self._board_address - - @property - @overrides(AbstractVirtual.virtual_chip_x) - def virtual_chip_x(self): - return self._virtual_chip_x - - @property - @overrides(AbstractVirtual.virtual_chip_y) - def virtual_chip_y(self): - return self._virtual_chip_y - - @overrides(AbstractVirtual.set_virtual_chip_coordinates) - def set_virtual_chip_coordinates(self, virtual_chip_x, virtual_chip_y): - if virtual_chip_x is not None and virtual_chip_y is not None: - self._virtual_chip_x = virtual_chip_x - self._virtual_chip_y = virtual_chip_y - self.add_constraint(ChipAndCoreConstraint( - self._virtual_chip_x, self._virtual_chip_y)) - if (self._app_vertex is not None and - self._app_vertex.virtual_chip_x is None and - self._app_vertex.virtual_chip_y is None): - self._app_vertex.set_virtual_chip_coordinates() diff --git a/pacman/model/graphs/machine/machine_vertex.py b/pacman/model/graphs/machine/machine_vertex.py index 6908c0579..ff38f3ac5 100644 --- a/pacman/model/graphs/machine/machine_vertex.py +++ b/pacman/model/graphs/machine/machine_vertex.py @@ -76,11 +76,10 @@ def vertex_slice(self): """ return self._vertex_slice - def get_n_keys_for_partition(self, _partition): + def get_n_keys_for_partition(self, partition_id): """ Get the number of keys required by the given partition of edges. - :param ~pacman.model.graphs.OutgoingEdgePartition _partition: - An partition that comes out of this vertex + :param str partition_id: The identifier of the partition :return: The number of keys required :rtype: int """ diff --git a/pacman/model/graphs/machine/multicast_edge_partition.py b/pacman/model/graphs/machine/multicast_edge_partition.py index 8f55a96e1..ae26c80cc 100644 --- a/pacman/model/graphs/machine/multicast_edge_partition.py +++ b/pacman/model/graphs/machine/multicast_edge_partition.py @@ -12,17 +12,11 @@ # # You should have received a copy of the GNU General Public License # along with this program. If not, see . -from pacman.model.graphs.machine.abstract_machine_edge_partition import ( - AbstractMachineEdgePartition) -from spinn_utilities.overrides import overrides -from pacman.model.graphs.common import EdgeTrafficType -from pacman.model.graphs import ( - AbstractEdgePartition, AbstractSingleSourcePartition) +from pacman.model.graphs import AbstractSingleSourcePartition from pacman.model.graphs.machine.machine_edge import MachineEdge -class MulticastEdgePartition( - AbstractSingleSourcePartition, AbstractMachineEdgePartition): +class MulticastEdgePartition(AbstractSingleSourcePartition): """ A simple implementation of a machine edge partition that will \ communicate with SpiNNaker multicast packets. They have a common set \ of sources with the same semantics and so can share a single key. @@ -30,38 +24,15 @@ class MulticastEdgePartition( __slots__ = () - def __init__( - self, pre_vertex, identifier, constraints=None, label=None, - traffic_weight=1): + def __init__(self, pre_vertex, identifier): """ :param pre_vertex: the pre vertex of this partition. :param str identifier: The identifier of the partition - :param list(AbstractConstraint) constraints: Any initial constraints - :param str label: An optional label of the partition - :param int traffic_weight: - The weight of traffic going down this partition """ super().__init__( pre_vertex=pre_vertex, identifier=identifier, - allowed_edge_types=MachineEdge, constraints=constraints, - label=label, traffic_weight=traffic_weight, - class_name="SingleSourceMachineEdgePartition") + allowed_edge_types=MachineEdge) - @overrides(AbstractSingleSourcePartition.add_edge) - def add_edge(self, edge, graph_code): - super().check_edge(edge) - super().add_edge(edge, graph_code) - - @property - @overrides(AbstractMachineEdgePartition.traffic_type) - def traffic_type(self): - return EdgeTrafficType.MULTICAST - - @overrides(AbstractEdgePartition.clone_without_edges) - def clone_without_edges(self): - """ - :rtype: MulticastEdgePartition - """ - return MulticastEdgePartition( - self._pre_vertex, self._identifier, self._constraints, - self._label, self._traffic_weight) + def __repr__(self): + return (f"MulticastEdgePartition(pre_vertex={self.pre_vertex}," + f" identifier={self.identifier})") diff --git a/pacman/model/graphs/machine/source_segmented_sdram_machine_partition.py b/pacman/model/graphs/machine/source_segmented_sdram_machine_partition.py index 3ba4ebe5b..4cb120edf 100644 --- a/pacman/model/graphs/machine/source_segmented_sdram_machine_partition.py +++ b/pacman/model/graphs/machine/source_segmented_sdram_machine_partition.py @@ -12,12 +12,10 @@ # # You should have received a copy of the GNU General Public License # along with this program. If not, see . -from pacman.model.graphs.machine import AbstractMachineEdgePartition from spinn_utilities.overrides import overrides from pacman.exceptions import ( PacmanConfigurationException, PartitionMissingEdgesException) from pacman.model.graphs import AbstractMultiplePartition -from pacman.model.graphs.common import EdgeTrafficType from pacman.model.graphs.machine import ( AbstractSDRAMPartition, SDRAMMachineEdge) @@ -31,7 +29,7 @@ class SourceSegmentedSDRAMMachinePartition( "_sdram_base_address", ] - def __init__(self, identifier, label, pre_vertices): + def __init__(self, identifier, pre_vertices): """ :param str identifier: The identifier of the partition :param str label: A label of the partition @@ -39,17 +37,9 @@ def __init__(self, identifier, label, pre_vertices): The vertices that an edge in this partition may originate at """ super().__init__( - pre_vertices, identifier, - allowed_edge_types=SDRAMMachineEdge, constraints=None, - label=label, traffic_weight=1, - class_name="SourceSegmentedSDRAMMachinePartition") + pre_vertices, identifier, allowed_edge_types=SDRAMMachineEdge) self._sdram_base_address = None - @property - @overrides(AbstractMachineEdgePartition.traffic_type) - def traffic_type(self): - return EdgeTrafficType.SDRAM - def total_sdram_requirements(self): """ :rtype: int @@ -64,20 +54,19 @@ def sdram_base_address(self): return self._sdram_base_address @overrides(AbstractMultiplePartition.add_edge) - def add_edge(self, edge, graph_code): + def add_edge(self, edge): # add - super().check_edge(edge) - super().add_edge(edge, graph_code) + super().add_edge(edge) # check if len(self._destinations.keys()) != 1: raise PacmanConfigurationException( "The {} can only support 1 destination vertex".format( - self._class_name)) + self.__class__.__name__)) if len(self._pre_vertices[edge.pre_vertex]) != 1: raise PacmanConfigurationException( "The {} only supports 1 edge from a given pre vertex.".format( - self._class_name)) + self.__class__.__name__)) if self._sdram_base_address is not None: raise PacmanConfigurationException( @@ -115,11 +104,3 @@ def get_sdram_size_of_region_for(self, vertex): return edge.sdram_size else: return self.total_sdram_requirements() - - @overrides(AbstractMultiplePartition.clone_without_edges) - def clone_without_edges(self): - """ - :rtype: SourceSegmentedSDRAMMachinePartition - """ - return SourceSegmentedSDRAMMachinePartition( - self._identifier, self._label, self._pre_vertices) diff --git a/pacman/model/partitioner_interfaces/abstract_splitter_partitioner.py b/pacman/model/partitioner_interfaces/abstract_splitter_partitioner.py index 5552b1d71..f1c6bd89b 100644 --- a/pacman/model/partitioner_interfaces/abstract_splitter_partitioner.py +++ b/pacman/model/partitioner_interfaces/abstract_splitter_partitioner.py @@ -12,7 +12,7 @@ # # You should have received a copy of the GNU General Public License # along with this program. If not, see . -from spinn_utilities.abstract_base import AbstractBase, abstractmethod +from spinn_utilities.abstract_base import AbstractBase class AbstractSplitterPartitioner(object, metaclass=AbstractBase): @@ -22,30 +22,3 @@ class AbstractSplitterPartitioner(object, metaclass=AbstractBase): This makes sure that the methods the superclass expects to be there are not removed. """ - - @abstractmethod - def create_machine_edge( - self, src_machine_vertex, dest_machine_vertex, - common_edge_type, app_edge, machine_graph, - app_outgoing_edge_partition, resource_tracker): - """ Create the machine edge (if needed) and add it to the graph. - - Some implementations of this method are able to detect that the - requested edge is not actually needed so never create or add it. - - :param ~pacman.model.graphs.machine.MachineVertex src_machine_vertex: - Src machine vertex of a edge - :param ~pacman.model.graphs.machine.MachineVertex dest_machine_vertex: - Dest machine vertex of a edge - :param ~pacman.model.graphs.machine.MachineEdge common_edge_type: - The edge type to build - :param ~pacman.model.graphs.application.ApplicationEdge app_edge: - The app edge this machine edge is to be associated with. - :param ~pacman.model.graphs.machine.MachineGraph machine_graph: - Machine graph to add edge to. - :param app_outgoing_edge_partition: Partition - :type app_outgoing_edge_partition: - ~pacman.model.graphs.OutgoingEdgePartition - :param ~pacman.utilities.utility_objs.ResourceTracker resource_tracker: - The resource tracker. - """ diff --git a/pacman/model/partitioner_splitters/__init__.py b/pacman/model/partitioner_splitters/__init__.py index b7dd11b77..46a764d3b 100644 --- a/pacman/model/partitioner_splitters/__init__.py +++ b/pacman/model/partitioner_splitters/__init__.py @@ -14,8 +14,8 @@ # along with this program. If not, see . from .splitter_one_app_one_machine import SplitterOneAppOneMachine -from .splitter_slice_legacy import SplitterSliceLegacy from .splitter_one_to_one_legacy import SplitterOneToOneLegacy +from .splitter_fixed_legacy import SplitterFixedLegacy __all__ = ['SplitterOneAppOneMachine', 'SplitterOneToOneLegacy', - 'SplitterSliceLegacy'] + 'SplitterFixedLegacy'] diff --git a/pacman/model/partitioner_splitters/abstract_splitters/__init__.py b/pacman/model/partitioner_splitters/abstract_splitters/__init__.py index d858d008d..3b624f9ac 100644 --- a/pacman/model/partitioner_splitters/abstract_splitters/__init__.py +++ b/pacman/model/partitioner_splitters/abstract_splitters/__init__.py @@ -13,9 +13,6 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . -from .abstract_dependent_splitter import AbstractDependentSplitter from .abstract_splitter_common import AbstractSplitterCommon -from .abstract_splitter_slice import AbstractSplitterSlice -__all__ = ["AbstractDependentSplitter", "AbstractSplitterCommon", - "AbstractSplitterSlice"] +__all__ = ["AbstractSplitterCommon"] diff --git a/pacman/model/partitioner_splitters/abstract_splitters/abstract_dependent_splitter.py b/pacman/model/partitioner_splitters/abstract_splitters/abstract_dependent_splitter.py deleted file mode 100644 index 9becf1e05..000000000 --- a/pacman/model/partitioner_splitters/abstract_splitters/abstract_dependent_splitter.py +++ /dev/null @@ -1,80 +0,0 @@ -# Copyright (c) 2020-2021 The University of Manchester -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . - -from .abstract_splitter_common import AbstractSplitterCommon -from pacman.exceptions import ( - PacmanAlreadyExistsException, PacmanPartitionException) - - -class AbstractDependentSplitter(AbstractSplitterCommon): - """ splitter that defines it needs to be run after another splitter. - """ - - __slots__ = [ - "_other_splitter" - ] - - CIRCULAR_ERROR_MESSAGE = ( - "Circular dependency found when setting splitter {} to be " - "dependent on splitter {}") - - def __init__(self, other_splitter, splitter_name): - """ Creates a splitter that must be done after the other unless None. - - :param other_splitter: the other splitter to depend upon - :type other_splitter: - ~pacman.model.partitioner_interfaces.AbstractSplitterCommon - or None - :param str splitter_name: - """ - super().__init__(splitter_name) - self._other_splitter = other_splitter - - @property - def other_splitter(self): - """ the other splitter - - :rtype: - ~pacman.model.partitioner_interfaces.AbstractSplitterCommon - or None - """ - return self._other_splitter - - def check_circular(self, upstream): - if upstream == self: - return True - if not isinstance(upstream, AbstractDependentSplitter): - return False - return self.check_circular(upstream.other_splitter) - - @other_splitter.setter - def other_splitter(self, new_value): - """ Supports the delayed setting of the other to depend on - - :param new_value: other splitter - :type new_value: AbstractSplitterCommon or None - :raise PacmanAlreadyExistsException: - If there is already a different other set - :raise PacmanPartitionException: - If a circular dependency is detected - """ - if (self._other_splitter is not None and - self._other_splitter != new_value): - raise PacmanAlreadyExistsException( - "other_splitter", self._other_splitter) - if self.check_circular(new_value): - raise PacmanPartitionException( - self.CIRCULAR_ERROR_MESSAGE.format(self, new_value)) - self._other_splitter = new_value diff --git a/pacman/model/partitioner_splitters/abstract_splitters/abstract_splitter_common.py b/pacman/model/partitioner_splitters/abstract_splitters/abstract_splitter_common.py index f9145a210..82173d74f 100644 --- a/pacman/model/partitioner_splitters/abstract_splitters/abstract_splitter_common.py +++ b/pacman/model/partitioner_splitters/abstract_splitters/abstract_splitter_common.py @@ -31,12 +31,6 @@ class AbstractSplitterCommon(object, metaclass=AbstractBase): ] - SETTING_SPLITTER_ERROR_MSG = ( - "The app vertex {} is already governed by this {}. " - "And so cannot govern app vertex {}. Please fix and try again.") - - STR_MESSAGE = "{} governing app vertex {}" - FIX_ATOMS_RESET = ( "Illegal attempt to set fixed atoms per core to {} " "as it was already set to {}") @@ -61,24 +55,13 @@ def __init__(self, splitter_name=None): self._governed_app_vertex = None def __str__(self): - return self.STR_MESSAGE.format( - self._splitter_name, self._governed_app_vertex) + return ( + f"{self._splitter_name} governing app vertex" + f" {self._governed_app_vertex}") def __repr__(self): return self.__str__() - def _get_map(self, edge_types): - """ builds map of machine vertex to edge type - - :param edge_types: the type of edges to add to the dict. - :return: dict of vertex as key, edge types as list in value - :rtype: dict(MachineVertex, EdgeType) - """ - result = dict() - for vertex in self._governed_app_vertex.machine_vertices: - result[vertex] = edge_types - return result - @property def governed_app_vertex(self): """ @@ -98,9 +81,8 @@ def set_governed_app_vertex(self, app_vertex): return if self._governed_app_vertex is not None: raise PacmanConfigurationException( - self.SETTING_SPLITTER_ERROR_MSG.format( - self._governed_app_vertex, self._splitter_name, - app_vertex)) + f"The app vertex {self._governed_app_vertex} is already" + f" governed by this splitter. ") self._governed_app_vertex = app_vertex self.check_supported_constraints() app_vertex.splitter = self @@ -115,112 +97,74 @@ def check_supported_constraints(self): supported_constraints=[], abstract_constraint_type=AbstractPartitionerConstraint) - def split(self, resource_tracker, machine_graph): - """ executes splitting - - :param ~pacman.utilities.utility_objs.ResourceTracker resource_tracker: - machine resources - :param ~pacman.model.graphs.machine.MachineGraph machine_graph: - machine graph - :return: true if successful, false otherwise - :rtype: bool - """ - return self.create_machine_vertices(resource_tracker, machine_graph) - @abstractmethod - def create_machine_vertices(self, resource_tracker, machine_graph): - """ method for specific splitter objects to use. + def create_machine_vertices(self, chip_counter): + """ Method for specific splitter objects to override. - :param ~pacman.utilities.utility_objs.ResourceTracker resource_tracker: - machine resources - :param ~pacman.model.graphs.machine.MachineGraph machine_graph: - machine graph - :return: true if successful, false otherwise - :rtype: bool + :param ChipCounter chip_counter: counter of used chips """ @abstractmethod def get_out_going_slices(self): - """ A best effort prediction of the slices of the output vertices. - - If this method is called after create_machine_vertices the splitter - should return the actual slices of the output vertices. - The second value returned is then always ``True`` + """ The slices of the output vertices. - If this method is called before create_machine_vertices the splitter - will have to make an estimate unless the actual slices it will use are - already known. The second value returned is ``True`` if and only if - the slices will not be changed. - - The output vertices are the ones that will serve as source vertices - for external edges. If more than one set of vertices match this - description the splitter should use the ones used by the most general - edge type/down-stream splitter. - - :return: list of Slices and bool of estimate or not - :rtype: tuple(list(~pacman.model.graphs.common.Slice), bool) + :return: list of Slices + :rtype: list(~pacman.model.graphs.common.Slice) """ @abstractmethod def get_in_coming_slices(self): - """ A best effort prediction of the slices of the input vertices. + """ The slices of the input vertices. - If this method is called after create_machine_vertices the splitter - should return the actual slices of the input vertices. - The second value returned is then always ``True`` + :return: list of Slices + :rtype: list(~pacman.model.graphs.common.Slice) + """ - If this method is called before create_machine_vertices the splitter - will have to make an estimate unless the actual slices it will use are - already known. The second value returned is ``True`` if and only if - the slices will not be changed. + @abstractmethod + def get_out_going_vertices(self, partition_id): + """ Get machine pre vertices The output vertices are the ones that will serve as source vertices - for external edges. If more than one set of vertices match this - description the splitter should use the ones used by the most general - edge type/ down stream splitter. + for external edges. - :return: the slices incoming to this vertex, bool if estimate or exact - :rtype: tuple(list(~pacman.model.graphs.common.Slice), bool) + :param str partition_id: The identifier of the outgoing partition + :rtype: list(MachineVertex) """ @abstractmethod - def get_out_going_vertices(self, edge, outgoing_edge_partition): - """ gets pre vertices and their acceptable edge types + def get_in_coming_vertices(self, partition_id): + """ Get machine post vertices for a given partition. - The output vertices are the ones that will serve as source vertices - for external edges. If more than one set of vertices match this - description the splitter should use the ones used by the most general - edge type/ down stream splitter. + The input vertices are the ones that will serve as target vertices + for external edges. Note this method returns all that could be used + for any source machine vertex in the given partition. - :param ~pacman.model.graphs.application.ApplicationEdge edge: app edge - :param outgoing_edge_partition: outgoing edge partition - :type outgoing_edge_partition: - ~pacman.model.graphs.OutgoingEdgePartition - :return: dict of keys being machine vertices and values are a list - of acceptable edge types. - :rtype: dict(~pacman.model.graphs.machine.MachineVertex,list(class)) + :param str partition_id: The identifier of the incoming partition + :rtype: list(MachineVertex) """ - @abstractmethod - def get_in_coming_vertices(self, edge, outgoing_edge_partition, - src_machine_vertex): - """ gets incoming vertices and their acceptable edge types - - The input vertices are the ones that will serve as dest vertices - for external edges. If more than one set of vertices match this - description the splitter should use the ones used by the most general - edge type/ down stream splitter. - - :param ~pacman.model.graphs.application.ApplicationEdge edge: app edge - :param outgoing_edge_partition: outgoing edge partition - :type outgoing_edge_partition: - ~pacman.model.graphs.OutgoingEdgePartition - :param ~pacman.model.graphs.machine.MachineVertex src_machine_vertex: - the src machine vertex - :return: dict of keys being machine vertices and values are a list - of acceptable edge types. - :rtype: dict(~pacman.model.graphs.machine.MachineVertex,list(class)) + def get_source_specific_in_coming_vertices( + self, source_vertex, partition_id): + """ Get machine post vertices for a given source. + + The input vertices are the ones that will serve as target vertices + for external edges. Note this method allows filtering of the targets + for a specific source machine vertex. + + This default method makes every machine vertex a target for the source. + This should be overridden if there are specific machine vertices for + any given source vertex. + + :param ApplicationVertex source_vertex: + The source to get incoming vertices for + :param str partition_id: The identifier of the incoming partition + :return: A list of tuples of (target machine vertex, list of source + machine or application vertices that should hit the target) + :rtype: list(tuple(MachineVertex, + list(MachineVertex or ApplicationVertex))) """ + return [(m_vertex, [source_vertex]) + for m_vertex in self.get_in_coming_vertices(partition_id)] @abstractmethod def machine_vertices_for_recording(self, variable_to_record): @@ -235,3 +179,32 @@ def machine_vertices_for_recording(self, variable_to_record): def reset_called(self): """ reset the splitter to be as if it has not operated a splitting yet. """ + + def get_same_chip_groups(self): + """ Get a list of lists of vertices and sdram which must be + allocated on the same chip. By default this returns a list of each + machine vertex and its SDRAM; override if there are groups of + machine vertices on the same chip. + + :rtype: list(list(MachineVertex), AbstractSDRAM) + """ + return [([v], v.resources_required.sdram) + for v in self._governed_app_vertex.machine_vertices] + + def get_internal_multicast_partitions(self): + """ Get edge partitions between machine vertices that are to be + handled by Multicast. Returns empty by default, override if there + are Multicast connections between internal vertices + + :rtype: list(MulticastEdgePartition) + """ + return [] + + def get_internal_sdram_partitions(self): + """ Get edge partitions between machine vertices that are to be + handled by SDRAM. Returns empty by default, override if there + are SDRAM connections between internal vertices + + :rtype: list(AbstractSDRAMPartition) + """ + return [] diff --git a/pacman/model/partitioner_splitters/abstract_splitters/abstract_splitter_slice.py b/pacman/model/partitioner_splitters/abstract_splitters/abstract_splitter_slice.py deleted file mode 100644 index 397fbfa47..000000000 --- a/pacman/model/partitioner_splitters/abstract_splitters/abstract_splitter_slice.py +++ /dev/null @@ -1,399 +0,0 @@ -# Copyright (c) 2020-2021 The University of Manchester -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . - -from spinn_utilities.overrides import overrides -from spinn_utilities.abstract_base import AbstractBase, abstractmethod -from pacman.model.graphs.machine import MachineEdge -from pacman.utilities.algorithm_utilities.\ - partition_algorithm_utilities import ( - get_remaining_constraints) -from pacman.exceptions import PacmanPartitionException, PacmanValueError -from pacman.model.graphs import AbstractVirtual -from pacman.model.graphs.common import Slice -from .abstract_splitter_common import AbstractSplitterCommon - - -class AbstractSplitterSlice(AbstractSplitterCommon, metaclass=AbstractBase): - """ Contains default logic for splitting by slice. - """ - - __slots__ = ["_called"] - - NOT_SUITABLE_VERTEX_ERROR = ( - "The vertex {} cannot be supported by the {} as " - "the vertex does not support the required API of " - "LegacyPartitionerAPI. Please inherit from the class in " - "pacman.model.partitioner_interfaces.legacy_partitioner_api and try " - "again.") - - NO_MORE_RESOURCE_AVAILABLE_ERROR = ( - "No more of vertex '{}' would fit on the board:\n" - " Allocated so far: {} atoms\n" - " Request for SDRAM: {}\n" - " Largest SDRAM space: {}") - - FAIL_TO_ALLOCATE_RESOURCES = ( - "Unable to allocate requested resources available to vertex " - "'{}':\n{}") - - MACHINE_LABEL = "{}:{}:{}" - - def __init__(self, splitter_name): - super().__init__(splitter_name) - self._called = False - - @overrides(AbstractSplitterCommon.get_out_going_vertices) - def get_out_going_vertices(self, edge, outgoing_edge_partition): - return self._get_map([MachineEdge]) - - @overrides(AbstractSplitterCommon.get_in_coming_vertices) - def get_in_coming_vertices( - self, edge, outgoing_edge_partition, src_machine_vertex): - return self._get_map([MachineEdge]) - - @overrides(AbstractSplitterCommon.get_out_going_slices) - def get_out_going_slices(self): - if self._called: - return self._governed_app_vertex.vertex_slices, True - else: - return self._estimate_slices() - - @overrides(AbstractSplitterCommon.get_in_coming_slices) - def get_in_coming_slices(self): - if self._called: - return self._governed_app_vertex.vertex_slices, True - else: - return self._estimate_slices() - - @overrides(AbstractSplitterCommon.machine_vertices_for_recording) - def machine_vertices_for_recording(self, variable_to_record): - return list(self._governed_app_vertex.machine_vertices) - - def __split(self, resource_tracker): - """ breaks a app vertex into its machine vertex bits. - - :param ResourceTracker resource_tracker: res tracker. - :return: map of slices to resources. for easier usage later. - :rtype: dict(Slice, ResourceContainer) - """ - slice_resource_map = dict() - n_atoms_placed = 0 - n_atoms = self._governed_app_vertex.n_atoms - max_atoms = self._governed_app_vertex.get_max_atoms_per_core() - while n_atoms_placed < n_atoms: - lo_atom = n_atoms_placed - hi_atom = lo_atom + max_atoms - 1 - if hi_atom >= n_atoms: - hi_atom = n_atoms - 1 - - # Scale down the number of atoms to fit the available resources - used_placements, hi_atom = self._scale_down_resources( - lo_atom, hi_atom, resource_tracker) - - # Update where we are - n_atoms_placed = hi_atom + 1 - - # Create the vertices - for used_resources in used_placements: - slice_resource_map[Slice(lo_atom, hi_atom)] = used_resources - return slice_resource_map - - def _scale_down_resources(self, lo_atom, hi_atom, resource_tracker): - """ Reduce the number of atoms on a core so that it fits within the\ - resources available. - - :param int lo_atom: the number of atoms already partitioned - :param int hi_atom: the total number of atoms to place for this vertex - :param ResourceTracker resource_tracker: Tracker of used resources - :return: the list of placements made by this method and the new amount - of atoms partitioned - :rtype: tuple(list(ResourceContainer), int) - :raise PacmanPartitionException: when the vertex cannot be partitioned - """ - - used_placements = list() - - # Find the number of atoms that will fit in each vertex given the - # resources available - min_hi_atom = hi_atom - - # get resources used by vertex - vertex_slice = Slice(lo_atom, hi_atom) - used_resources = self.get_resources_used_by_atoms(vertex_slice) - - x = None - y = None - p = None - ip_tags = None - reverse_ip_tags = None - if not isinstance(self._governed_app_vertex, AbstractVirtual): - - # get max resources_available on machine - resources_available = resource_tracker.\ - get_maximum_constrained_resources_available( - used_resources, self._governed_app_vertex.constraints) - - # Work out the ratio of used to available resources - ratio = self._find_max_ratio( - used_resources, resources_available, - resource_tracker.plan_n_time_steps) - - while ratio > 1.0 and hi_atom >= lo_atom: - # Scale the resources available by the ratio - old_n_atoms = (hi_atom - lo_atom) + 1 - new_n_atoms = int(old_n_atoms / (ratio * 1.1)) - - # Avoid infinite looping - if old_n_atoms == new_n_atoms: - new_n_atoms -= 1 - - # Find the new resource usage - hi_atom = lo_atom + new_n_atoms - 1 - if hi_atom >= lo_atom: - vertex_slice = Slice(lo_atom, hi_atom) - used_resources = ( - self.get_resources_used_by_atoms(vertex_slice)) - ratio = self._find_max_ratio( - used_resources, resources_available, - resource_tracker.plan_n_time_steps) - - # If we couldn't partition, raise an exception - if hi_atom < lo_atom: - raise PacmanPartitionException( - self.NO_MORE_RESOURCE_AVAILABLE_ERROR.format( - self._governed_app_vertex, lo_atom - 1, - used_resources.sdram.get_total_sdram( - resource_tracker.plan_n_time_steps), - resources_available.sdram.get_total_sdram( - resource_tracker.plan_n_time_steps))) - - # Try to scale up until just below the resource usage - used_resources, hi_atom = self._scale_up_resource_usage( - used_resources, hi_atom, lo_atom, resources_available, ratio, - resource_tracker.plan_n_time_steps) - - # If this hi_atom is smaller than the current minimum, update - # the other placements to use (hopefully) less - # resources available - if hi_atom < min_hi_atom: - min_hi_atom = hi_atom - used_placements = self._reallocate_resources( - used_placements, resource_tracker, lo_atom, - hi_atom) - - # Attempt to allocate the resources available for this vertex - # on the machine - try: - (x, y, p, ip_tags, reverse_ip_tags) = \ - resource_tracker.allocate_constrained_resources( - used_resources, self._governed_app_vertex.constraints) - except PacmanValueError as e: - raise PacmanValueError( - self.FAIL_TO_ALLOCATE_RESOURCES.format( - self._governed_app_vertex, e)) from e - - used_placements.append( - (x, y, p, used_resources, ip_tags, reverse_ip_tags)) - - # reduce data to what the parent requires - final_placements = list() - for (_, _, _, used_resources, _, _) in used_placements: - final_placements.append(used_resources) - - return final_placements, min_hi_atom - - def _scale_up_resource_usage( - self, used_resources, hi_atom, lo_atom, resources, ratio, - plan_n_time_steps): - """ Try to push up the number of atoms in a vertex to be as close\ - to the available resources as possible - - :param ResourceContainer used_resources: - the resources used by the machine so far - :param int hi_atom: the total number of atoms to place for this vertex - :param int lo_atom: the number of atoms already partitioned - :param int plan_n_time_steps: number of time steps to plan for - :param ResourceContainer resources: - the resource estimate for the vertex for a given number of atoms - :param float ratio: the ratio between max atoms and available resources - :return: the new resources used and the new hi_atom - :rtype: tuple(ResourceContainer, int) - """ - previous_used_resources = used_resources - previous_hi_atom = hi_atom - - # Keep searching while the ratio is still in range, - # the next hi_atom value is still less than the number of atoms, - # and the number of atoms is less than the constrained number of atoms - max_atoms = self._governed_app_vertex.get_max_atoms_per_core() - while ((ratio < 1.0) and ( - hi_atom + 1 < self._governed_app_vertex.n_atoms) and - (hi_atom - lo_atom + 2 < max_atoms)): - - # Update the hi_atom, keeping track of the last hi_atom which - # resulted in a ratio < 1.0 - previous_hi_atom = hi_atom - hi_atom += 1 - - # Find the new resource usage, keeping track of the last usage - # which resulted in a ratio < 1.0 - previous_used_resources = used_resources - vertex_slice = Slice(lo_atom, hi_atom) - used_resources = self.get_resources_used_by_atoms(vertex_slice) - ratio = self._find_max_ratio( - used_resources, resources, plan_n_time_steps) - - # If we have managed to fit everything exactly (unlikely but possible), - # return the matched resources and high atom count - if ratio == 1.0: - return used_resources, hi_atom - - # At this point, the ratio > 1.0, so pick the last allocation of - # resources, which will be < 1.0 - return previous_used_resources, previous_hi_atom - - def _reallocate_resources( - self, used_placements, resource_tracker, lo_atom, hi_atom): - """ Readjusts resource allocation and updates the placement list to\ - take into account the new layout of the atoms - - :param used_placements: - the original list of tuples containing placement data - :type used_placements: list(tuple( - ApplicationVertex, int, int, int, ResourceContainer, - list(tuple(int, int)), list(tuple(int, int)))) - :param ResourceTracker resource_tracker: the tracker of resources - :param int lo_atom: the low atom of a slice to be considered - :param int hi_atom: the high atom of a slice to be considered - :return: the new list of tuples containing placement data - :rtype: list(tuple( - ApplicationVertex, int, int, int, ResourceContainer, - list(tuple(int, int)), list(tuple(int, int)))) - """ - - new_used_placements = list() - for (x, y, p, placed_resources, ip_tags, reverse_ip_tags) in \ - used_placements: - - if not isinstance(self._governed_app_vertex, AbstractVirtual): - # Deallocate the existing resources - resource_tracker.unallocate_resources( - x, y, p, placed_resources, ip_tags, reverse_ip_tags) - - # Get the new resource usage - vertex_slice = Slice(lo_atom, hi_atom) - new_resources = self.get_resources_used_by_atoms(vertex_slice) - - if not isinstance(self._governed_app_vertex, AbstractVirtual): - # Re-allocate the existing resources - (x, y, p, ip_tags, reverse_ip_tags) = ( - resource_tracker.allocate_constrained_resources( - new_resources, self._governed_app_vertex.constraints)) - new_used_placements.append( - (x, y, p, new_resources, ip_tags, reverse_ip_tags)) - return new_used_placements - - @staticmethod - def _ratio(numerator, denominator): - """ Get the ratio between two values, with special handling for when\ - the denominator is zero. - - :param int numerator: - :param int denominator: - :rtype: float - """ - if denominator == 0: - return 0.0 - return numerator / denominator - - @classmethod - def _find_max_ratio(cls, required, available, plan_n_time_steps): - """ Find the max ratio between the resources. - - :param ResourceContainer required: the resources used by the vertex - :param ResourceContainer available: - the max resources available from the machine - :param int plan_n_time_steps: number of time steps to plan for - :return: the largest ratio of resources - :rtype: float - """ - cpu_ratio = cls._ratio( - required.cpu_cycles.get_value(), - available.cpu_cycles.get_value()) - dtcm_ratio = cls._ratio( - required.dtcm.get_value(), available.dtcm.get_value()) - sdram_ratio = cls._ratio( - required.sdram.get_total_sdram(plan_n_time_steps), - available.sdram.get_total_sdram(plan_n_time_steps)) - return max((cpu_ratio, dtcm_ratio, sdram_ratio)) - - @overrides(AbstractSplitterCommon.create_machine_vertices) - def create_machine_vertices(self, resource_tracker, machine_graph): - slices_resources_map = self.__split(resource_tracker) - for vertex_slice in slices_resources_map: - machine_vertex = self.create_machine_vertex( - vertex_slice, slices_resources_map[vertex_slice], - self.MACHINE_LABEL.format( - self._governed_app_vertex.label, vertex_slice.lo_atom, - vertex_slice.hi_atom), - get_remaining_constraints(self._governed_app_vertex)) - machine_graph.add_vertex(machine_vertex) - self._called = True - return True - - @abstractmethod - def create_machine_vertex( - self, vertex_slice, resources, label, remaining_constraints): - """ creates a machine vertex - - :param ~pacman.model.graphs.common.Slice vertex_slice: vertex slice - :param ~pacman.utilities.utility_objs.ResourceTracker resources: - resources - :param str label: human readable label for machine vertex. - :param remaining_constraints: none partitioner constraints. - :type remaining_constraints: - iterable(~pacman.model.constraints.AbstractConstraint) - :return: machine vertex - :rtype: ~pacman.model.graphs.machine.MachineVertex - """ - - @abstractmethod - def get_resources_used_by_atoms(self, vertex_slice): - """ gets the resources of a slice of atoms from a given app vertex. - - :param ~pacman.model.graphs.common.Slice vertex_slice: - the slice to find the resources of. - :return: Resource container. - :rtype: ~pacman.model.resources.ResourceContainer - """ - - def _estimate_slices(self): - """ Estimates the slices for when the governed_app_vertex has not\ - already been split. - - :return: The slices of this vertex, bool if estimate or exact - :rtype: tuple(list(~pacman.model.graphs.common.Slice), bool - - """ - n_atoms = self._governed_app_vertex.n_atoms - per_core = self._governed_app_vertex.get_max_atoms_per_core() - - return ( - [Slice(lo, min(lo + per_core - 1, n_atoms)) - for lo in range(0, n_atoms, per_core)], False) - - @overrides(AbstractSplitterCommon.reset_called) - def reset_called(self): - self._called = False diff --git a/pacman/model/partitioner_splitters/splitter_fixed_legacy.py b/pacman/model/partitioner_splitters/splitter_fixed_legacy.py new file mode 100644 index 000000000..79616b128 --- /dev/null +++ b/pacman/model/partitioner_splitters/splitter_fixed_legacy.py @@ -0,0 +1,109 @@ +# Copyright (c) 2017-2019 The University of Manchester +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +import logging +from pacman.exceptions import PacmanConfigurationException +from pacman.model.partitioner_interfaces import LegacyPartitionerAPI +from pacman.model.partitioner_splitters.abstract_splitters import ( + AbstractSplitterCommon) +from spinn_utilities.overrides import overrides +from spinn_utilities.log import FormatAdapter +from pacman.utilities.algorithm_utilities\ + .partition_algorithm_utilities import get_remaining_constraints +from pacman.model.graphs.common.slice import Slice + +logger = FormatAdapter(logging.getLogger(__name__)) + + +class SplitterFixedLegacy(AbstractSplitterCommon): + + __slots__ = ["__slices", "__vertex_map"] + + NOT_API_WARNING = ( + "Your vertex is deprecated. Please add a Splitter or " + "inherit from the class in " + "pacman.model.partitioner_interfaces.legacy_partitioner_api") + + NOT_SUITABLE_VERTEX_ERROR = ( + "The vertex {} cannot be supported by the {} as" + " the vertex does not support the required method {} of " + "LegacyPartitionerAPI. Please inherit from the class in " + "pacman.model.partitioner_interfaces.legacy_partitioner_api and try " + "again.") + + SPLITTER_NAME = "SplitterFixedLegacy" + + def __init__(self, splitter_name=None): + if splitter_name is None: + splitter_name = self.SPLITTER_NAME + super().__init__(splitter_name) + self.__slices = None + + @overrides(AbstractSplitterCommon.set_governed_app_vertex) + def set_governed_app_vertex(self, app_vertex): + super().set_governed_app_vertex(app_vertex) + if not isinstance(app_vertex, LegacyPartitionerAPI): + for abstractmethod in LegacyPartitionerAPI.abstract_methods(): + check = getattr(app_vertex, abstractmethod, None) + if not check: + raise PacmanConfigurationException( + self.NOT_SUITABLE_VERTEX_ERROR.format( + app_vertex.label, self._splitter_name, + abstractmethod)) + logger.warning(self.NOT_API_WARNING) + + @overrides(AbstractSplitterCommon.get_out_going_vertices) + def get_out_going_vertices(self, partition_id): + return list(self._governed_app_vertex.machine_vertices) + + @overrides(AbstractSplitterCommon.get_in_coming_vertices) + def get_in_coming_vertices(self, partition_id): + return list(self._governed_app_vertex.machine_vertices) + + @overrides(AbstractSplitterCommon.machine_vertices_for_recording) + def machine_vertices_for_recording(self, variable_to_record): + return list(self._governed_app_vertex.machine_vertices) + + @overrides(AbstractSplitterCommon.get_out_going_slices) + def get_out_going_slices(self): + return self.__fixed_slices + + @overrides(AbstractSplitterCommon.get_in_coming_slices) + def get_in_coming_slices(self): + return self.__fixed_slices + + @property + def __fixed_slices(self): + if self.__slices is None: + n_atoms = self._governed_app_vertex.n_atoms + per_core = self._governed_app_vertex.get_max_atoms_per_core() + self.__slices = [Slice(i, min(i + per_core - 1, n_atoms - 1)) + for i in range(0, n_atoms, per_core)] + return self.__slices + + @overrides(AbstractSplitterCommon.create_machine_vertices) + def create_machine_vertices(self, chip_counter): + app_vertex = self._governed_app_vertex + remaining_constraints = get_remaining_constraints(app_vertex) + for vertex_slice in self.__fixed_slices: + resources = app_vertex.get_resources_used_by_atoms(vertex_slice) + chip_counter.add_core(resources) + label = f"MachineVertex for {vertex_slice} of {app_vertex.label}" + machine_vertex = app_vertex.create_machine_vertex( + vertex_slice, resources, label, remaining_constraints) + app_vertex.remember_machine_vertex(machine_vertex) + + @overrides(AbstractSplitterCommon.reset_called) + def reset_called(self): + self.__slices = None diff --git a/pacman/model/partitioner_splitters/splitter_one_app_one_machine.py b/pacman/model/partitioner_splitters/splitter_one_app_one_machine.py index 63d094c3d..60a5782b6 100644 --- a/pacman/model/partitioner_splitters/splitter_one_app_one_machine.py +++ b/pacman/model/partitioner_splitters/splitter_one_app_one_machine.py @@ -17,7 +17,6 @@ from spinn_utilities.overrides import overrides from spinn_utilities.log import FormatAdapter from pacman.exceptions import PacmanConfigurationException -from pacman.model.graphs.machine import MachineEdge from pacman.model.partitioner_splitters.abstract_splitters import ( AbstractSplitterCommon) from pacman.model.graphs.application.abstract import ( @@ -51,29 +50,25 @@ def set_governed_app_vertex(self, app_vertex): super().set_governed_app_vertex(app_vertex) @overrides(AbstractSplitterCommon.create_machine_vertices) - def create_machine_vertices(self, resource_tracker, machine_graph): - machine_vertex = self._governed_app_vertex.machine_vertex - resource_tracker.allocate_constrained_resources( - machine_vertex.resources_required, machine_vertex.constraints) - machine_graph.add_vertex(machine_vertex) - return machine_vertex + def create_machine_vertices(self, chip_counter): + chip_counter.add_core( + self._governed_app_vertex.machine_vertex.resources_required) @overrides(AbstractSplitterCommon.get_out_going_slices) def get_out_going_slices(self): - return [self._governed_app_vertex.machine_vertex.vertex_slice], True + return [self._governed_app_vertex.machine_vertex.vertex_slice] @overrides(AbstractSplitterCommon.get_in_coming_slices) def get_in_coming_slices(self): - return [self._governed_app_vertex.machine_vertex.vertex_slice], True + return [self._governed_app_vertex.machine_vertex.vertex_slice] @overrides(AbstractSplitterCommon.get_out_going_vertices) - def get_out_going_vertices(self, edge, outgoing_edge_partition): - return {self._governed_app_vertex.machine_vertex: [MachineEdge]} + def get_out_going_vertices(self, partition_id): + return [self._governed_app_vertex.machine_vertex] @overrides(AbstractSplitterCommon.get_in_coming_vertices) - def get_in_coming_vertices( - self, edge, outgoing_edge_partition, src_machine_vertex): - return {self._governed_app_vertex.machine_vertex: [MachineEdge]} + def get_in_coming_vertices(self, partition_id): + return [self._governed_app_vertex.machine_vertex] @overrides(AbstractSplitterCommon.machine_vertices_for_recording) def machine_vertices_for_recording(self, variable_to_record): diff --git a/pacman/model/partitioner_splitters/splitter_one_to_one_legacy.py b/pacman/model/partitioner_splitters/splitter_one_to_one_legacy.py index cd6ba36e8..c472050be 100644 --- a/pacman/model/partitioner_splitters/splitter_one_to_one_legacy.py +++ b/pacman/model/partitioner_splitters/splitter_one_to_one_legacy.py @@ -18,7 +18,6 @@ from spinn_utilities.log import FormatAdapter from pacman.exceptions import PacmanConfigurationException from pacman.model.graphs.common import Slice -from pacman.model.graphs.machine import MachineEdge from pacman.model.partitioner_splitters.abstract_splitters import ( AbstractSplitterCommon) from pacman.model.partitioner_interfaces import LegacyPartitionerAPI @@ -52,7 +51,7 @@ def __init__(self): self._resources_required = None def __str__(self): - return self.STR_MESSAGE.format(self._governed_app_vertex) + return f"SplitterOneToOneLegacy for {self._governed_app_vertex}" def __repr__(self): return self.__str__() @@ -69,6 +68,7 @@ def set_governed_app_vertex(self, app_vertex): vertex_slice=self._vertex_slice, resources_required=self._resources_required, label=None, constraints=None)) + self._governed_app_vertex.remember_machine_vertex(self._machine_vertex) if not isinstance(app_vertex, LegacyPartitionerAPI): for abstractmethod in LegacyPartitionerAPI.abstract_methods(): check = getattr(app_vertex, abstractmethod, None) @@ -80,28 +80,24 @@ def set_governed_app_vertex(self, app_vertex): logger.warning(self.NOT_API_WARNING) @overrides(AbstractSplitterCommon.create_machine_vertices) - def create_machine_vertices(self, resource_tracker, machine_graph): - resource_tracker.allocate_constrained_resources( - self._resources_required, self._governed_app_vertex.constraints) - machine_graph.add_vertex(self._machine_vertex) - return self._machine_vertex + def create_machine_vertices(self, chip_counter): + chip_counter.add_core(self._resources_required) @overrides(AbstractSplitterCommon.get_out_going_slices) def get_out_going_slices(self): - return [self._vertex_slice], True + return [self._vertex_slice] @overrides(AbstractSplitterCommon.get_in_coming_slices) def get_in_coming_slices(self): - return [self._vertex_slice], True + return [self._vertex_slice] @overrides(AbstractSplitterCommon.get_out_going_vertices) - def get_out_going_vertices(self, edge, outgoing_edge_partition): - return {self._machine_vertex: [MachineEdge]} + def get_out_going_vertices(self, partition_id): + return [self._machine_vertex] @overrides(AbstractSplitterCommon.get_in_coming_vertices) - def get_in_coming_vertices(self, edge, outgoing_edge_partition, - src_machine_vertex): - return {self._machine_vertex: [MachineEdge]} + def get_in_coming_vertices(self, partition_id): + return [self._machine_vertex] @overrides(AbstractSplitterCommon.machine_vertices_for_recording) def machine_vertices_for_recording(self, variable_to_record): diff --git a/pacman/model/partitioner_splitters/splitter_slice_legacy.py b/pacman/model/partitioner_splitters/splitter_slice_legacy.py deleted file mode 100644 index d3ad338df..000000000 --- a/pacman/model/partitioner_splitters/splitter_slice_legacy.py +++ /dev/null @@ -1,70 +0,0 @@ -# Copyright (c) 2017-2019 The University of Manchester -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . -import logging -from pacman.exceptions import PacmanConfigurationException -from pacman.model.partitioner_interfaces import LegacyPartitionerAPI -from pacman.model.partitioner_splitters.abstract_splitters import ( - AbstractSplitterSlice) -from spinn_utilities.overrides import overrides -from spinn_utilities.log import FormatAdapter - -logger = FormatAdapter(logging.getLogger(__name__)) - - -class SplitterSliceLegacy(AbstractSplitterSlice): - - __slots__ = [] - - NOT_API_WARNING = ( - "Your vertex is deprecated. Please add a Splitter or " - "inherit from the class in " - "pacman.model.partitioner_interfaces.legacy_partitioner_api") - - NOT_SUITABLE_VERTEX_ERROR = ( - "The vertex {} cannot be supported by the {} as" - " the vertex does not support the required method {} of " - "LegacyPartitionerAPI. Please inherit from the class in " - "pacman.model.partitioner_interfaces.legacy_partitioner_api and try " - "again.") - - SPLITTER_NAME = "SplitterSliceLegacy" - - def __init__(self, splitter_name=None): - if splitter_name is None: - splitter_name = self.SPLITTER_NAME - super().__init__(splitter_name) - - @overrides(AbstractSplitterSlice.set_governed_app_vertex) - def set_governed_app_vertex(self, app_vertex): - super().set_governed_app_vertex(app_vertex) - if not isinstance(app_vertex, LegacyPartitionerAPI): - for abstractmethod in LegacyPartitionerAPI.abstract_methods(): - check = getattr(app_vertex, abstractmethod, None) - if not check: - raise PacmanConfigurationException( - self.NOT_SUITABLE_VERTEX_ERROR.format( - app_vertex.label, self._splitter_name, - abstractmethod)) - logger.warning(self.NOT_API_WARNING) - - @overrides(AbstractSplitterSlice.create_machine_vertex) - def create_machine_vertex( - self, vertex_slice, resources, label, remaining_constraints): - return self._governed_app_vertex.create_machine_vertex( - vertex_slice, resources, label, remaining_constraints) - - def get_resources_used_by_atoms(self, vertex_slice): - return self._governed_app_vertex.get_resources_used_by_atoms( - vertex_slice) diff --git a/pacman/model/placements/placement.py b/pacman/model/placements/placement.py index 22a1f14c1..daba9de1d 100644 --- a/pacman/model/placements/placement.py +++ b/pacman/model/placements/placement.py @@ -90,6 +90,14 @@ def location(self): """ return (self._x, self._y, self._p) + @property + def xy(self): + """ The (x,y) tuple that represents the chip of this placement. + + :rtype: tuple(int,int) + """ + return (self._x, self._y) + def __eq__(self, other): if not isinstance(other, Placement): return False diff --git a/pacman/model/placements/placements.py b/pacman/model/placements/placements.py index ce303e1d2..7d0d2afb9 100644 --- a/pacman/model/placements/placements.py +++ b/pacman/model/placements/placements.py @@ -1,4 +1,4 @@ -# Copyright (c) 2017-2019 The University of Manchester +# Copyright (c) 2017-2022 The University of Manchester # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -13,6 +13,7 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . +from collections import defaultdict from pacman.exceptions import ( PacmanAlreadyPlacedError, PacmanNotPlacedError, PacmanProcessorAlreadyOccupiedError, PacmanProcessorNotOccupiedError) @@ -23,8 +24,8 @@ class Placements(object): """ __slots__ = [ - # dict of [(x,y,p)] -> placement object. used for fast lookup of a - # vertex given a set of coordinates + # dict of [(x,y)] -> dict of p->placement object. used for fast lookup + # of a vertex given a set of coordinates "_placements", # dict of [machine_vertex] -> placement object. used for fast lookup of @@ -40,7 +41,7 @@ def __init__(self, placements=None): :raise PacmanProcessorAlreadyOccupiedError: If two placements are made to the same processor. """ - self._placements = dict() + self._placements = defaultdict(dict) self._machine_vertices = dict() if placements is not None: self.add_placements(placements) @@ -51,7 +52,7 @@ def n_placements(self): :rtype: int """ - return len(self._placements) + return len(self._machine_vertices) def add_placements(self, placements): """ Add some placements @@ -70,13 +71,14 @@ def add_placement(self, placement): :raise PacmanProcessorAlreadyOccupiedError: If two placements are made to the same processor. """ - placement_id = placement.location - if placement_id in self._placements: - raise PacmanProcessorAlreadyOccupiedError(placement_id) + x, y, p = placement.location + if (x, y) in self._placements: + if p in self._placements[(x, y)]: + raise PacmanProcessorAlreadyOccupiedError((x, y, p)) if placement.vertex in self._machine_vertices: raise PacmanAlreadyPlacedError(placement.vertex) - self._placements[placement_id] = placement + self._placements[x, y][p] = placement self._machine_vertices[placement.vertex] = placement def get_vertex_on_processor(self, x, y, p): @@ -91,11 +93,10 @@ def get_vertex_on_processor(self, x, y, p): :raise PacmanProcessorNotOccupiedError: If the processor is not occupied """ - placement_id = (x, y, p) try: - return self._placements[placement_id].vertex + return self._placements[x, y][p].vertex except KeyError as e: - raise PacmanProcessorNotOccupiedError(placement_id) from e + raise PacmanProcessorNotOccupiedError((x, y, p)) from e def get_placement_on_processor(self, x, y, p): """ Return the placement on a specific processor or raises an exception @@ -109,11 +110,18 @@ def get_placement_on_processor(self, x, y, p): :raise PacmanProcessorNotOccupiedError: If the processor is not occupied """ - placement_id = (x, y, p) try: - return self._placements[placement_id] + return self._placements[x, y][p] except KeyError as e: - raise PacmanProcessorNotOccupiedError(placement_id) from e + raise PacmanProcessorNotOccupiedError((x, y, p)) from e + + def is_vertex_placed(self, vertex): + """ Determine if a vertex has been placed + + :param MachineVertex vertex: The vertex to determine the status of + :rtype: bool + """ + return vertex in self._machine_vertices def get_placement_of_vertex(self, vertex): """ Return the placement information for a vertex @@ -128,14 +136,6 @@ def get_placement_of_vertex(self, vertex): except KeyError as e: raise PacmanNotPlacedError(vertex) from e - def get_placed_processors(self): - """ Return an iterable of processors with assigned vertices. - - :return: Iterable of (x, y, p) tuples - :rtype: iterable(tuple(int, int, int)) - """ - return iter(self._placements.keys()) - def is_processor_occupied(self, x, y, p): """ Determine if a processor has a vertex on it @@ -144,7 +144,16 @@ def is_processor_occupied(self, x, y, p): :param int p: Index of processor. :return bool: Whether the processor has an assigned vertex. """ - return (x, y, p) in self._placements + return (x, y) in self._placements and p in self._placements[x, y] + + def n_placements_on_chip(self, x, y): + """ The number of placements on the given chip + :param int x: x coordinate of chip. + :param int y: y coordinate of chip. + """ + if (x, y) not in self._placements: + return 0 + return len(self._placements[x, y]) @property def placements(self): @@ -153,7 +162,24 @@ def placements(self): :return: iterable of placements :rtype: iterable(Placement) """ - return iter(self._placements.values()) + return iter(self._machine_vertices.values()) + + def placements_on_chip(self, x, y): + """ Get the placements on a specific chip + + :param int x: The x-coordinate of the chip + :param int y: The y-coordinate of the chip + :rtype: iterable(Placement) + """ + return self._placements[x, y].values() + + @property + def chips_with_placements(self): + """ Get the chips with placements on them + + :rtype: iterable(tuple(int,int)) + """ + return self._placements.keys() def __repr__(self): output = "" @@ -164,7 +190,7 @@ def __repr__(self): def __iter__(self): """ An iterator for the placements object within """ - return iter(self.placements) + return iter(self._machine_vertices.values()) def __len__(self): - return len(self._placements) + return len(self._machine_vertices) diff --git a/pacman/model/resources/__init__.py b/pacman/model/resources/__init__.py index bd095c584..b81fb8b3d 100644 --- a/pacman/model/resources/__init__.py +++ b/pacman/model/resources/__init__.py @@ -14,7 +14,6 @@ # along with this program. If not, see . from .abstract_sdram import AbstractSDRAM -from .core_tracker import CoreTracker from .constant_sdram import ConstantSDRAM from .core_resource import CoreResource from .cpu_cycles_per_tick_resource import CPUCyclesPerTickResource @@ -22,14 +21,11 @@ from .element_free_space import ElementFreeSpace from .iptag_resource import IPtagResource from .multi_region_sdram import MultiRegionSDRAM -from .pre_allocated_resource_container import \ - PreAllocatedResourceContainer from .resource_container import ResourceContainer from .reverse_iptag_resource import ReverseIPtagResource from .variable_sdram import VariableSDRAM -__all__ = ["AbstractSDRAM", "CoreTracker", "ConstantSDRAM", "CoreResource", +__all__ = ["AbstractSDRAM", "ConstantSDRAM", "CoreResource", "CPUCyclesPerTickResource", "DTCMResource", "ElementFreeSpace", "IPtagResource", "MultiRegionSDRAM", - "PreAllocatedResourceContainer", "ResourceContainer", - "ReverseIPtagResource", "VariableSDRAM"] + "ResourceContainer", "ReverseIPtagResource", "VariableSDRAM"] diff --git a/pacman/model/resources/core_tracker.py b/pacman/model/resources/core_tracker.py deleted file mode 100644 index 055e8c0ac..000000000 --- a/pacman/model/resources/core_tracker.py +++ /dev/null @@ -1,97 +0,0 @@ -# Copyright (c) 2017-2019 The University of Manchester -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . - -from spinn_utilities.ordered_set import OrderedSet - - -class CoreTracker(object): - """ Represents the number of cores and sdram left to allocate - """ - - __slots__ = [ - - # The number of cores available after preallocation - "_n_cores", - - # cores available including ones needed for preallocation - "_cores", - - # keep list of counts of the cores per n_cores_available - "_cores_counter", - ] - - def __init__(self, chip, preallocated_resources, cores_counter): - """ - :param ~spinn_machine.Chip chip: - chip whose resources can be allocated - :param preallocated_resources: - :type preallocated_resources: PreAllocatedResourceContainer or None - """ - self._cores = OrderedSet() - for processor in chip.processors: - if not processor.is_monitor: - self._cores.add(processor.processor_id) - self._n_cores = len(self._cores) - if preallocated_resources: - if chip.ip_address: - self._n_cores -= preallocated_resources.cores_ethernet - else: - self._n_cores -= preallocated_resources.cores_all - if chip.virtual: - self._cores_counter = None - else: - self._cores_counter = cores_counter - if self._cores_counter: - self._cores_counter[self._n_cores] += 1 - - @property - def n_cores_available(self): - return self._n_cores - - def is_core_available(self, p): - if p is None: - return self.is_available - else: - return p in self._cores - - def available_core(self): - return self._cores.peek() - - @property - def is_available(self): - return self._n_cores > 0 - - def allocate(self, p): - if p is None: - p = self._cores.pop() - else: - self._cores.remove(p) - if self._cores_counter: - self._cores_counter[self._n_cores] -= 1 - self._n_cores -= 1 - if self._cores_counter: - self._cores_counter[self._n_cores] += 1 - - if self._n_cores <= 0: - self._cores = OrderedSet() - return p - - def deallocate(self, p): - self._cores.add(p) - if self._cores_counter: - self._cores_counter[self._n_cores] -= 1 - self._n_cores += 1 - if self._cores_counter: - self._cores_counter[self._n_cores] += 1 diff --git a/pacman/model/resources/pre_allocated_resource_container.py b/pacman/model/resources/pre_allocated_resource_container.py deleted file mode 100644 index 76d66d8db..000000000 --- a/pacman/model/resources/pre_allocated_resource_container.py +++ /dev/null @@ -1,107 +0,0 @@ -# Copyright (c) 2017-2019 The University of Manchester -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . - -from .constant_sdram import ConstantSDRAM - - -class PreAllocatedResourceContainer(object): - """ Container object for preallocated resources - """ - - __slots__ = [ - # Sdram to preallocate on all none ethernet Chips - "_sdram_all", - - # sdram to preallocate for Ethernet Chips - # This includes the values from _sdram_all - "_sdram_ethernet", - - # Number of cores to preallocate on all none ethernet chips - "_cores_all", - - # Number of cores to preallocate on ethernet chips - # This includes the cores in cores_all - "_cores_ethernet", - - # iptag resources to be perallocated on all everynets - "_iptag_resources"] - - def __init__(self): - self._sdram_all = ConstantSDRAM(0) - self._sdram_ethernet = ConstantSDRAM(0) - self._cores_all = 0 - self._cores_ethernet = 0 - self._iptag_resources = [] - - @property - def sdram_all(self): - return self._sdram_all - - def add_sdram_all(self, extra): - """ - Add extra sdram to preallocate on all chips including ethernets - - :param AbstractSDRAM extra: Additioanal sdram required - """ - self._sdram_all += extra - self._sdram_ethernet += extra - - @property - def sdram_ethernet(self): - return self._sdram_ethernet - - def add_sdram_ethernet(self, extra): - """ - Add extra sdram to preallocate on ethernet chips - - :param AbstractSDRAM extra: Additioanal sdram required - """ - self._sdram_ethernet += extra - - @property - def cores_all(self): - return self._cores_all - - def add_cores_all(self, extra): - """ - Add extra core requirement for all cores including ethernets - - :param int extra: number of extra cores - """ - self._cores_all += extra - self._cores_ethernet += extra - - @property - def cores_ethernet(self): - return self._cores_ethernet - - def add_cores_ethernet(self, extra): - """ - Add extra core requirement for all cores including ethernets - - :param int extra: number of extra cores - """ - self._cores_ethernet += extra - - @property - def iptag_resources(self): - return self._iptag_resources - - def add_iptag_resource(self, extra): - """ - Adds an additional iptag resource to be reserved on all ethernet chips - :param IPtagResource extraa: - """ - self._iptag_resources.append(extra) diff --git a/pacman/model/routing_info/__init__.py b/pacman/model/routing_info/__init__.py index 3d3f0b81b..96235d3bd 100644 --- a/pacman/model/routing_info/__init__.py +++ b/pacman/model/routing_info/__init__.py @@ -18,9 +18,10 @@ from .base_key_and_mask import BaseKeyAndMask from .dict_based_machine_partition_n_keys_map import ( DictBasedMachinePartitionNKeysMap) -from .partition_routing_info import PartitionRoutingInfo from .routing_info import RoutingInfo +from .machine_vertex_routing_info import MachineVertexRoutingInfo +from .app_vertex_routing_info import AppVertexRoutingInfo __all__ = ["AbstractMachinePartitionNKeysMap", "BaseKeyAndMask", - "DictBasedMachinePartitionNKeysMap", "PartitionRoutingInfo", - "RoutingInfo"] + "DictBasedMachinePartitionNKeysMap", "MachineVertexRoutingInfo", + "RoutingInfo", "AppVertexRoutingInfo"] diff --git a/pacman/model/routing_info/app_vertex_routing_info.py b/pacman/model/routing_info/app_vertex_routing_info.py new file mode 100644 index 000000000..522029d68 --- /dev/null +++ b/pacman/model/routing_info/app_vertex_routing_info.py @@ -0,0 +1,108 @@ +# Copyright (c) 2021 The University of Manchester +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +from .vertex_routing_info import VertexRoutingInfo +from spinn_machine.multicast_routing_entry import MulticastRoutingEntry +from spinn_utilities.overrides import overrides + +import math +import logging + +logger = logging.getLogger(__name__) + + +class AppVertexRoutingInfo(VertexRoutingInfo): + + __slots__ = [ + "__app_vertex", + "__machine_mask", + "__n_bits_atoms", + "__max_machine_index"] + + def __init__( + self, keys_and_masks, partition_id, app_vertex, machine_mask, + n_bits_atoms, max_machine_index): + super(AppVertexRoutingInfo, self).__init__( + keys_and_masks, partition_id) + self.__app_vertex = app_vertex + self.__machine_mask = machine_mask + self.__n_bits_atoms = n_bits_atoms + self.__max_machine_index = max_machine_index + + def merge_machine_entries(self, entries): + n_entries = len(entries) + (_, _, _, last_r_info) = entries[-1] + is_last = last_r_info.index == self.__max_machine_index + i = 0 + while i < n_entries: + # The maximum number of next entries + (_, _, entry, r_info) = entries[i] + next_entries = self.__n_sequential_entries(r_info.index, n_entries) + + # If that is OK, we can just use them + if next_entries <= (n_entries - i) or is_last: + mask = self.__group_mask(next_entries) + yield MulticastRoutingEntry( + r_info.first_key, mask, defaultable=entry.defaultable, + spinnaker_route=entry.spinnaker_route) + i += next_entries + + # Otherwise, we have to break down into powers of two + else: + entries_to_go = n_entries - i + while entries_to_go > 0: + next_entries = 2 ** int(math.log2(entries_to_go)) + mask = self.__group_mask(next_entries) + (_, _, entry, r_info) = entries[i] + yield MulticastRoutingEntry( + r_info.first_key, mask, + defaultable=entry.defaultable, + spinnaker_route=entry.spinnaker_route) + entries_to_go -= next_entries + i += next_entries + + def __group_mask(self, n_entries): + return self.__machine_mask - ((n_entries - 1) << self.__n_bits_atoms) + + def __n_sequential_entries(self, i, n_entries): + # This finds the maximum number of entries that can be joined following + # the starting entry index. This is calculated by finding how many + # zero bits are in the least significant position in the index. These + # can then be masked out to merge entries. + # Works because -v == not v + 1 + if i > 0: + return i & -i + return 2 ** int(math.ceil(math.log2(n_entries))) + + @property + @overrides(VertexRoutingInfo.vertex) + def vertex(self): + return self.__app_vertex + + @property + def machine_mask(self): + """ The mask that covers a specific machine vertex + + :rtype: int + """ + + return self.__machine_mask + + @property + def n_bits_atoms(self): + """ The number of bits for the atoms + + :rtype: int + """ + return self.__n_bits_atoms diff --git a/pacman/model/routing_info/machine_vertex_routing_info.py b/pacman/model/routing_info/machine_vertex_routing_info.py new file mode 100644 index 000000000..8e742505c --- /dev/null +++ b/pacman/model/routing_info/machine_vertex_routing_info.py @@ -0,0 +1,64 @@ +# Copyright (c) 2021 The University of Manchester +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +from .vertex_routing_info import VertexRoutingInfo +from spinn_utilities.overrides import overrides + + +class MachineVertexRoutingInfo(VertexRoutingInfo): + """ Associates a machine vertex and partition identifier to its routing + information (keys and masks). + """ + + __slots__ = [ + + # The machine vertex that the keys are allocated to + "__machine_vertex", + + # The index of the machine vertex within the range of the application + # vertex + "__index" + ] + + def __init__(self, keys_and_masks, partition_id, machine_vertex, index): + """ + :param iterable(BaseKeyAndMask) keys_and_masks:\ + The keys allocated to the machine partition + :param str partition_id: The partition to set the keys for + :param MachineVertex machine_vertex: The vertex to set the keys for + :param int index: The index of the machine vertex + """ + super(MachineVertexRoutingInfo, self).__init__( + keys_and_masks, partition_id) + self.__machine_vertex = machine_vertex + self.__index = index + + @property + def machine_vertex(self): + """ The machine vertex + + :rtype: MachineVertex + """ + return self.__machine_vertex + + @property + @overrides(VertexRoutingInfo.vertex) + def vertex(self): + return self.__machine_vertex + + @property + def index(self): + """ The index of the vertex + """ + return self.__index diff --git a/pacman/model/routing_info/routing_info.py b/pacman/model/routing_info/routing_info.py index 5b7e7f1c4..b30981080 100644 --- a/pacman/model/routing_info/routing_info.py +++ b/pacman/model/routing_info/routing_info.py @@ -17,107 +17,50 @@ class RoutingInfo(object): - """ An association of a set of edges to a non-overlapping set of keys\ + """ An association of machine vertices to a non-overlapping set of keys\ and masks. """ __slots__ = [ - # Partition information indexed by partition - "_info_by_partition", # Partition information indexed by edge pre vertex and partition ID\ # name - "_info_by_prevertex", - - # Partition information by edge - "_info_by_edge" + "_info" ] - def __init__(self, partition_info_items=None): - """ - :param partition_info_items: The partition information items to add - :type partition_info_items: iterable(PartitionRoutingInfo) or None - :raise PacmanAlreadyExistsException: If there are - two partition information objects with the same partition - """ - - # Partition information indexed by partition - self._info_by_partition = dict() + def __init__(self): # Partition information indexed by edge pre vertex and partition ID # name - self._info_by_prevertex = dict() + self._info = dict() - # Partition information by edge - self._info_by_edge = dict() + def add_routing_info(self, info): + """ Add a routing information item - if partition_info_items is not None: - for partition_info_item in partition_info_items: - self.add_partition_info(partition_info_item) - - def add_partition_info(self, partition_info): - """ Add a partition information item - - :param PartitionRoutingInfo partition_info:\ - The partition information item to add + :param VertexRoutingInfo info: + The routing information item to add :rtype: None - :raise PacmanAlreadyExistsException:\ + :raise PacmanAlreadyExistsException: If the partition is already in the set of edges """ - p = partition_info.partition - - if p in self._info_by_partition: + key = (info.vertex, info.partition_id) + if key in self._info: raise PacmanAlreadyExistsException( - "Partition", str(partition_info)) - if (p.pre_vertex, p.identifier) in self._info_by_prevertex: - raise PacmanAlreadyExistsException( - "Partition", str(partition_info)) - - self._info_by_partition[p] = partition_info - self._info_by_prevertex[p.pre_vertex, p.identifier] = partition_info - - for edge in p.edges: - self._info_by_edge[edge] = partition_info + "Routing information", str(info)) - def get_first_key_from_partition(self, partition): - """ Get the first key associated with a particular partition - - :param AbstractSingleSourcePartition partition:\ - The partition to get the first key of - :return: The routing key, or None if the partition does not exist - :rtype: int or None - """ - if partition in self._info_by_partition: - return self._info_by_partition[ - partition].keys_and_masks[0].key - return None - - def get_routing_info_from_partition(self, partition): - """ Get the routing information for a given partition. - - :param AbstractSingleSourcePartition partition:\ - The partition to obtain routing information about. - :return: the partition_routing_info for the partition, if any exists - :rtype: PartitionRoutingInfo or None - """ - if partition in self._info_by_partition: - return self._info_by_partition[partition] - return None + self._info[key] = info def get_routing_info_from_pre_vertex(self, vertex, partition_id): - """ Get routing information for edges with a given partition_id from\ - a prevertex + """ Get routing information for a given partition_id from a vertex - :param AbstractVertex vertex: The prevertex to search for + :param AbstractVertex vertex: The vertex to search for :param str partition_id:\ The ID of the partition for which to get the routing information """ - if (vertex, partition_id) in self._info_by_prevertex: - return self._info_by_prevertex[vertex, partition_id] - return None + return self._info.get((vertex, partition_id)) def get_first_key_from_pre_vertex(self, vertex, partition_id): - """ Get the first key for the partition starting at a (pre)vertex + """ Get the first key for the partition starting at a vertex :param AbstractVertex vertex: The vertex which the partition starts at :param str partition_id:\ @@ -125,30 +68,14 @@ def get_first_key_from_pre_vertex(self, vertex, partition_id): :return: The routing key of the partition :rtype: int """ - if (vertex, partition_id) in self._info_by_prevertex: - return self._info_by_prevertex[ - vertex, partition_id].keys_and_masks[0].key - return None - - def get_routing_info_for_edge(self, edge): - """ Get routing information for an edge - - :param AbstractEdge edge: The edge to search for - """ - return self._info_by_edge.get(edge, None) - - def get_first_key_for_edge(self, edge): - """ Get routing key for an edge - - :param AbstractEdge edge: The edge to search for - """ - if edge in self._info_by_edge: - return self._info_by_edge[edge].keys_and_masks[0].key + key = (vertex, partition_id) + if key in self._info: + return self._info[key].keys_and_masks[0].key return None def __iter__(self): - """ Gets an iterator for the partition routing information + """ Gets an iterator for the routing information - :return: a iterator of partition routing information + :return: a iterator of routing information """ - return iter(self._info_by_partition.values()) + return iter(self._info.values()) diff --git a/pacman/model/routing_info/partition_routing_info.py b/pacman/model/routing_info/vertex_routing_info.py similarity index 64% rename from pacman/model/routing_info/partition_routing_info.py rename to pacman/model/routing_info/vertex_routing_info.py index b7b664ca9..557a64ce2 100644 --- a/pacman/model/routing_info/partition_routing_info.py +++ b/pacman/model/routing_info/vertex_routing_info.py @@ -1,4 +1,4 @@ -# Copyright (c) 2017-2019 The University of Manchester +# Copyright (c) 2021 The University of Manchester # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -15,29 +15,32 @@ import numpy from pacman.exceptions import PacmanConfigurationException +from spinn_utilities.abstract_base import abstractproperty, AbstractBase -class PartitionRoutingInfo(object): - """ Associates a partition to its routing information (keys and masks). +class VertexRoutingInfo(object, metaclass=AbstractBase): + """ Associates a partition identifier to its routing information + (keys and masks). """ __slots__ = [ # The keys allocated to the machine partition - "_keys_and_masks", + "__keys_and_masks", - # The partition to set the number of keys for - "_partition" + # The partition identifier of the allocation + "__partition_id" ] - def __init__(self, keys_and_masks, partition): + def __init__(self, keys_and_masks, partition_id): """ :param iterable(BaseKeyAndMask) keys_and_masks:\ The keys allocated to the machine partition - :param AbstractSingleSourcePartition partition:\ - The partition to set the number of keys for + :param str partition_id: The partition to set the keys for + :param MachineVertex machine_vertex: The vertex to set the keys for + :param int index: The index of the machine vertex """ - self._keys_and_masks = keys_and_masks - self._partition = partition + self.__keys_and_masks = keys_and_masks + self.__partition_id = partition_id def get_keys(self, n_keys=None): """ Get the ordered list of individual keys allocated to the edge @@ -47,7 +50,7 @@ def get_keys(self, n_keys=None): :rtype: ~numpy.ndarray """ - max_n_keys = sum(km.n_keys for km in self._keys_and_masks) + max_n_keys = sum(km.n_keys for km in self.__keys_and_masks) if n_keys is None: n_keys = max_n_keys @@ -58,7 +61,7 @@ def get_keys(self, n_keys=None): key_array = numpy.zeros(n_keys, dtype=">u4") offset = 0 - for key_and_mask in self._keys_and_masks: + for key_and_mask in self.__keys_and_masks: _, offset = key_and_mask.get_keys( key_array=key_array, offset=offset, n_keys=(n_keys - offset)) return key_array @@ -68,7 +71,7 @@ def keys_and_masks(self): """ :rtype: iterable(BaseKeyAndMask) """ - return self._keys_and_masks + return self.__keys_and_masks @property def first_key_and_mask(self): @@ -76,7 +79,7 @@ def first_key_and_mask(self): :rtype: BaseKeyAndMask """ - return self._keys_and_masks[0] + return self.__keys_and_masks[0] @property def first_key(self): @@ -84,7 +87,7 @@ def first_key(self): :rtype: int """ - return self._keys_and_masks[0].key + return self.__keys_and_masks[0].key @property def first_mask(self): @@ -92,15 +95,19 @@ def first_mask(self): :rtype: int """ - return self._keys_and_masks[0].mask + return self.__keys_and_masks[0].mask @property - def partition(self): - """ - :rtype: AbstractSingleSourcePartition + def partition_id(self): + """ The identifier of the partition + + :rtype: str """ - return self._partition + return self.__partition_id - def __repr__(self): - return "partition:{}, keys_and_masks:{}".format( - self._partition, self._keys_and_masks) + @abstractproperty + def vertex(self): + """ The vertex of the information + + :rtype: ApplicationVertex or MachineVertex + """ diff --git a/pacman/model/routing_table_by_partition/multicast_routing_table_by_partition.py b/pacman/model/routing_table_by_partition/multicast_routing_table_by_partition.py index f49755ca0..62fe92b4b 100644 --- a/pacman/model/routing_table_by_partition/multicast_routing_table_by_partition.py +++ b/pacman/model/routing_table_by_partition/multicast_routing_table_by_partition.py @@ -13,39 +13,68 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . +from pacman.model.graphs.application import ApplicationVertex +from pacman.exceptions import PacmanInvalidParameterException +import logging + +log = logging.getLogger(__name__) + class MulticastRoutingTableByPartition(object): """ A set of multicast routing path objects """ __slots__ = [ - # dict mapping (x,y) -> dict mapping (partition) -> routing table entry + # dict mapping (x,y) -> dict mapping (source_vertex, partition_id)) + # -> routing table entry "_router_to_entries_map" ] def __init__(self): self._router_to_entries_map = dict() - def add_path_entry(self, entry, router_x, router_y, partition): + def add_path_entry( + self, entry, router_x, router_y, source_vertex, partition_id): """ Adds a multicast routing path entry :param MulticastRoutingTableByPartitionEntry entry: the entry to add :param int router_x: the x coord of the router :param int router_y: the y coord of the router - :param AbstractSingleSourcePartition partition:\ - the partition containing the machine edge + :param source_vertex: The source that will send via this entry + :type source_vertex: ApplicationVertex or MachineVertex + :param str partition_id: The id of the partition being sent """ # update router_to_entries_map key = (router_x, router_y) - if key not in self._router_to_entries_map: - self._router_to_entries_map[key] = dict() + entries = self._router_to_entries_map.get(key) + if entries is None: + entries = dict() + self._router_to_entries_map[key] = entries + + if isinstance(source_vertex, ApplicationVertex): + for m_vert in source_vertex.machine_vertices: + if (m_vert, partition_id) in entries: + raise PacmanInvalidParameterException( + "source_vertex", source_vertex, + f"Route for Machine vertex {m_vert}, " + f"partition {partition_id} already in table") + else: + if (source_vertex.app_vertex, partition_id) in entries: + raise PacmanInvalidParameterException( + "source_vertex", source_vertex, + f"Route for Application vertex {source_vertex.app_vertex}" + f" partition {partition_id} already in table") - if partition not in self._router_to_entries_map[key]: - self._router_to_entries_map[key][partition] = entry + source_key = (source_vertex, partition_id) + if source_key not in entries: + entries[source_key] = entry else: - self._router_to_entries_map[key][partition] = entry.merge_entry( - self._router_to_entries_map[key][partition]) + try: + entries[source_key] = entry.merge_entry(entries[source_key]) + except PacmanInvalidParameterException as e: + log.error(f"Error merging entries on {key} for {source_key}") + raise e def get_routers(self): """ Get the coordinates of all stored routers @@ -54,29 +83,38 @@ def get_routers(self): """ return iter(self._router_to_entries_map.keys()) + @property + def n_routers(self): + """ Get the number of routers stored + + :rtype: int + """ + return len(self._router_to_entries_map) + def get_entries_for_router(self, router_x, router_y): """ Get the set of multicast path entries assigned to this router :param int router_x: the x coord of the router :param int router_y: the y coord of the router :return: all router_path_entries for the router. - :rtype: dict(AbstractSingleSourcePartition,\ + :rtype: dict((ApplicationVertex or MachineVertex), str),\ MulticastRoutingTableByPartitionEntry) """ key = (router_x, router_y) - if key not in self._router_to_entries_map: - return () - return self._router_to_entries_map[key] + return self._router_to_entries_map.get(key) - def get_entry_on_coords_for_edge(self, partition, router_x, router_y): + def get_entry_on_coords_for_edge( + self, source_vertex, partition_id, router_x, router_y): """ Get an entry from a specific coordinate - :param AbstractSingleSourcePartition partition: + :param source_vertex: + :type source_vertex: ApplicationVertex or MachineVertex + :param str partition_id: :param int router_x: the x coord of the router :param int router_y: the y coord of the router :rtype: MulticastRoutingTableByPartitionEntry or None """ entries = self.get_entries_for_router(router_x, router_y) - if partition in entries: - return entries[partition] - return None + if entries is None: + return None + return entries.get((source_vertex, partition_id)) diff --git a/pacman/model/routing_table_by_partition/multicast_routing_table_by_partition_entry.py b/pacman/model/routing_table_by_partition/multicast_routing_table_by_partition_entry.py index 149da79ea..7502b9e22 100644 --- a/pacman/model/routing_table_by_partition/multicast_routing_table_by_partition_entry.py +++ b/pacman/model/routing_table_by_partition/multicast_routing_table_by_partition_entry.py @@ -13,7 +13,24 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . +from spinn_utilities.log import FormatAdapter from pacman.exceptions import PacmanInvalidParameterException +import logging + +log = FormatAdapter(logging.getLogger(__name__)) + +_INCOMING_LINK_MASK = 0x07000000 +_INCOMING_LINK_SHIFT = 24 +_INCOMING_PROC_MASK = 0xF8000000 +_INCOMING_PROC_SHIFT = 27 +_OUTGOING_LINKS_MASK = 0x0000003F +_OUTGOING_LINK_1 = 0x00000001 +_OUTGOING_PROCS_MASK = 0x00FFFFC0 +_OUTGOING_PROC_1 = 0x00000040 +_SPINNAKER_ROUTE_MASK = _OUTGOING_LINKS_MASK | _OUTGOING_PROCS_MASK +_COMPARE_MASK = _INCOMING_LINK_MASK | _SPINNAKER_ROUTE_MASK +_N_PROCS = 18 +_N_LINKS = 6 class MulticastRoutingTableByPartitionEntry(object): @@ -21,17 +38,13 @@ class MulticastRoutingTableByPartitionEntry(object): """ __slots__ = [ - # the edges this path entry goes down - "_out_going_links", - - # the processors this path entry goes to - "_out_going_processors", - - # the direction this entry came from in link - "_incoming_link", - - # the direction this entry came from - "_incoming_processor" + # Entry made up of bits as follows: + # | IL = 6 bits | IP = 1 bit | OL = 6 bits | OP = 18 bits | + # IL = incoming link id + # IP = whether the source is a processor or not + # OL = outgoing links + # OP = outgoing processors + "_links_and_procs" ] def __init__(self, out_going_links, outgoing_processors, @@ -49,43 +62,50 @@ def __init__(self, out_going_links, outgoing_processors, the direction this entry came from in link (between 0 and 5) :raises PacmanInvalidParameterException: """ + self._links_and_procs = 0 if isinstance(out_going_links, int): - self._out_going_links = set() - self._out_going_links.add(out_going_links) + self.__set_outgoing_links([out_going_links]) elif out_going_links is not None: - self._out_going_links = set(int(link) for link in out_going_links) - else: - self._out_going_links = set() + self.__set_outgoing_links(out_going_links) if isinstance(outgoing_processors, int): - self._out_going_processors = set() - self._out_going_processors.add(outgoing_processors) + self.__set_outgoing_procs([outgoing_processors]) elif outgoing_processors is not None: - self._out_going_processors = set( - int(p) for p in outgoing_processors) - else: - self._out_going_processors = set() + self.__set_outgoing_procs(outgoing_processors) if incoming_link is not None and incoming_processor is not None: raise PacmanInvalidParameterException( "The incoming direction for a path can only be from either " "one link or one processors, not both", str(incoming_link), str(incoming_processor)) - if (incoming_processor is not None - and not isinstance(incoming_processor, int)): - raise PacmanInvalidParameterException( - "The incoming direction for a path can only be from either " - "one link or one processors, not both", - str(incoming_link), str(incoming_processor)) - if incoming_link is not None and not isinstance(incoming_link, int): - raise PacmanInvalidParameterException( - "The incoming direction for a path can only be from either " - "one link or one processors, not both", - str(incoming_link), str(incoming_processor)) - self._incoming_processor = ( - None if incoming_processor is None else int(incoming_processor)) - self._incoming_link = ( - None if incoming_link is None else int(incoming_link)) + if incoming_processor is not None: + self.__set_incoming_proc(incoming_processor) + elif incoming_link is not None: + self.__set_incoming_link(incoming_link) + + def __set_incoming_link(self, link): + if link > _N_LINKS: + raise ValueError(f"Link {link} > {_N_LINKS}") + # Add one so that 0 means not set + self._links_and_procs |= (link + 1) << _INCOMING_LINK_SHIFT + + def __set_incoming_proc(self, proc): + if proc > _N_PROCS: + raise ValueError(f"Processor {proc} > {_N_PROCS}") + # Add one so that 0 means not set + self._links_and_procs |= (proc + 1) << _INCOMING_PROC_SHIFT + + def __set_outgoing_links(self, links): + for link in links: + if link > _N_LINKS: + raise ValueError(f"Link {link} > {_N_LINKS}") + self._links_and_procs |= _OUTGOING_LINK_1 << link + + def __set_outgoing_procs(self, procs): + for proc in procs: + if proc > _N_PROCS: + raise ValueError(f"Processor {proc} > {_N_PROCS}") + self._links_and_procs |= _OUTGOING_PROC_1 << proc @property def processor_ids(self): @@ -93,7 +113,8 @@ def processor_ids(self): :rtype: set(int) """ - return self._out_going_processors + return set(i for i in range(_N_PROCS) + if self._links_and_procs & (_OUTGOING_PROC_1 << i)) @property def link_ids(self): @@ -101,7 +122,8 @@ def link_ids(self): :rtype: set(int) """ - return self._out_going_links + return set(i for i in range(_N_LINKS) + if self._links_and_procs & (_OUTGOING_LINK_1 << i)) @property def incoming_link(self): @@ -109,20 +131,24 @@ def incoming_link(self): :rtype: int or None """ - return self._incoming_link + link = ((self._links_and_procs & _INCOMING_LINK_MASK) >> + _INCOMING_LINK_SHIFT) + if link == 0: + return None + # Subtract 1 as 0 means not set + return link - 1 @incoming_link.setter def incoming_link(self, incoming_link): - if self._incoming_processor is not None: + if self.incoming_processor is not None: raise Exception( "Entry already has an incoming processor {}".format( - self._incoming_processor)) - if (self._incoming_link is not None and - self._incoming_link != incoming_link): + self.incoming_processor)) + self_link = self.incoming_link + if self_link is not None and self_link != incoming_link: raise Exception( - "Entry already has an incoming link {}".format( - self._incoming_link)) - self._incoming_link = int(incoming_link) + "Entry already has an incoming link {}".format(self_link)) + self.__set_incoming_link(incoming_link) @property def incoming_processor(self): @@ -130,43 +156,53 @@ def incoming_processor(self): :rtype: int or None """ - return self._incoming_processor + proc = ((self._links_and_procs & _INCOMING_PROC_MASK) >> + _INCOMING_PROC_SHIFT) + if proc == 0: + return None + # Subtract 1 as 0 means not set + return proc - 1 @incoming_processor.setter def incoming_processor(self, incoming_processor): - if (self._incoming_processor is not None and - self._incoming_processor != incoming_processor): - raise Exception( - "Entry already has an incoming processor {}".format( - self._incoming_processor)) - if self._incoming_link is not None: + if self.incoming_link is not None: raise Exception( "Entry already has an incoming link {}".format( - self._incoming_link)) - self._incoming_processor = int(incoming_processor) + self.incoming_link)) + self_proc = self.incoming_processor + if self_proc is not None and self_proc != incoming_processor: + raise Exception( + "Entry already has an incoming processor {}".format( + self_proc)) + self.__set_incoming_proc(incoming_processor) @property def defaultable(self): """ The defaultable status of the entry """ - if (self._incoming_link is None - or self._incoming_processor is not None - or len(self._out_going_links) != 1 - or self._out_going_processors): + if self.incoming_processor is not None: return False - outgoing_link = next(iter(self._out_going_links)) - return (self._incoming_link + 3) % 6 == outgoing_link + in_link = self.incoming_link + if in_link is None: + return False + out_links = self.link_ids + if len(out_links) != 1: + return False + if self.processor_ids: + return False + out_link = next(iter(out_links)) + return ((in_link + 3) % 6) == out_link @staticmethod - def __merge_noneables(p1, p2, name): + def __merge_none_or_equal(p1, p2, name): if p1 is None: return p2 - if p2 is None or p1 == p2: + if p2 is None or p2 == p1: return p1 raise PacmanInvalidParameterException( name, "invalid merge", - "The two MulticastRoutingTableByPartitionEntry have different " + - name + "s, and so can't be merged") + "The two MulticastRoutingTableByPartitionEntry have " + "different " + name + "s, and so can't be merged") def merge_entry(self, other): """ Merges the another entry with this one and returns a new\ @@ -184,24 +220,38 @@ def merge_entry(self, other): "MulticastRoutingTableByPartitionEntry, and therefore cannot " "be merged.") - # validate and merge - valid_incoming_processor = self.__merge_noneables( - self._incoming_processor, other.incoming_processor, - "incoming_processor") - valid_incoming_link = self.__merge_noneables( - self._incoming_link, other.incoming_link, "incoming_link") - merged_outgoing_processors = self._out_going_processors.union( - other.processor_ids) - merged_outgoing_links = self._out_going_links.union( - other.link_ids) - - return MulticastRoutingTableByPartitionEntry( - merged_outgoing_links, merged_outgoing_processors, - valid_incoming_processor, valid_incoming_link) + # validate incoming + try: + in_proc = self.__merge_none_or_equal( + self.incoming_processor, other.incoming_processor, + "incoming_processor") + in_link = self.__merge_none_or_equal( + self.incoming_link, other.incoming_link, "incoming_link") + if in_proc is not None and in_link is not None: + raise PacmanInvalidParameterException( + "other", "merge error", + f"Cannot merge {other} and {self}: both incoming processor" + " and link are set") + except PacmanInvalidParameterException as e: + log.error("Error merging entry {} into {}", other, self) + raise e + + # Set the value directly as faster + entry = MulticastRoutingTableByPartitionEntry(None, None) + entry._links_and_procs = self._links_and_procs | other._links_and_procs + return entry def __repr__(self): return "{}:{}:{}:{{{}}}:{{{}}}".format( - self._incoming_link, self._incoming_processor, + self.incoming_link, self.incoming_processor, self.defaultable, - ", ".join(map(str, self._out_going_links)), - ", ".join(map(str, self._out_going_processors))) + ", ".join(map(str, self.link_ids)), + ", ".join(map(str, self.processor_ids))) + + def has_same_route(self, entry): + return ((self._links_and_procs & _COMPARE_MASK) == + (entry._links_and_procs & _COMPARE_MASK)) + + @property + def spinnaker_route(self): + return self._links_and_procs & _SPINNAKER_ROUTE_MASK diff --git a/pacman/model/routing_tables/multicast_routing_tables.py b/pacman/model/routing_tables/multicast_routing_tables.py index 1d6eaf98a..7c67cee3b 100644 --- a/pacman/model/routing_tables/multicast_routing_tables.py +++ b/pacman/model/routing_tables/multicast_routing_tables.py @@ -26,8 +26,6 @@ class MulticastRoutingTables(object): """ __slots__ = [ - # set that holds routing tables - "_routing_tables", # dict of (x,y) -> routing table "_routing_tables_by_chip", # maximum value for number_of_entries in all tables @@ -41,7 +39,6 @@ def __init__(self, routing_tables=None): :raise PacmanAlreadyExistsException: If any two routing tables are for the same chip """ - self._routing_tables = set() self._routing_tables_by_chip = dict() self._max_number_of_entries = 0 @@ -57,12 +54,6 @@ def add_routing_table(self, routing_table): :raise PacmanAlreadyExistsException: If a routing table already exists for the chip """ - if routing_table in self._routing_tables: - raise PacmanAlreadyExistsException( - "The Routing table {} has already been added to the collection" - " before and therefore already exists".format(routing_table), - str(routing_table)) - if (routing_table.x, routing_table.y) in self._routing_tables_by_chip: raise PacmanAlreadyExistsException( "The Routing table for chip {}:{} already exists in this " @@ -70,7 +61,6 @@ def add_routing_table(self, routing_table): .format(routing_table.x, routing_table.y), str(routing_table)) self._routing_tables_by_chip[(routing_table.x, routing_table.y)] = \ routing_table - self._routing_tables.add(routing_table) self._max_number_of_entries = max( self._max_number_of_entries, routing_table.number_of_entries) @@ -82,7 +72,7 @@ def routing_tables(self): :rtype: iterable(MulticastRoutingTable) :raise None: does not raise any known exceptions """ - return self._routing_tables + return self._routing_tables_by_chip.values() @property def max_number_of_entries(self): @@ -112,7 +102,7 @@ def __iter__(self): :return: iterator of multicast_routing_table """ - return iter(self._routing_tables) + return iter(self._routing_tables_by_chip.values()) def to_json(router_table): diff --git a/pacman/model/routing_tables/uncompressed_multicast_routing_table.py b/pacman/model/routing_tables/uncompressed_multicast_routing_table.py index 0de49293a..279666e3b 100644 --- a/pacman/model/routing_tables/uncompressed_multicast_routing_table.py +++ b/pacman/model/routing_tables/uncompressed_multicast_routing_table.py @@ -18,8 +18,7 @@ import logging from spinn_utilities.log import FormatAdapter from spinn_machine import MulticastRoutingEntry -from pacman.exceptions import ( - PacmanAlreadyExistsException, PacmanRoutingException) +from pacman.exceptions import PacmanAlreadyExistsException from pacman.model.routing_tables import AbstractMulticastRoutingTable from spinn_utilities.overrides import overrides @@ -37,19 +36,13 @@ class UnCompressedMulticastRoutingTable(AbstractMulticastRoutingTable): # The y-coordinate of the chip for which this is the routing tables "_y", - # An iterable of routing entries to add to the table - "_multicast_routing_entries", - # dict of multicast routing entries. # (key, mask) -> multicast_routing_entry "_entries_by_key_mask", # counter of how many entries in their multicast routing table are # defaultable - "_number_of_defaulted_routing_entries", - - # dict of multicast routing entries. (key) -> entry - "_entries_by_key" + "_number_of_defaulted_routing_entries" ] def __init__(self, x, y, multicast_routing_entries=None): @@ -68,9 +61,7 @@ def __init__(self, x, y, multicast_routing_entries=None): self._x = x self._y = y self._number_of_defaulted_routing_entries = 0 - self._multicast_routing_entries = list() self._entries_by_key_mask = dict() - self._entries_by_key = dict() if multicast_routing_entries is not None: for multicast_routing_entry in multicast_routing_entries: @@ -94,10 +85,7 @@ def add_multicast_routing_entry(self, multicast_routing_entry): raise PacmanAlreadyExistsException( "Multicast_routing_entry", str(multicast_routing_entry)) - self._entries_by_key_mask[tuple_key] =\ - multicast_routing_entry - self._entries_by_key[routing_entry_key] = multicast_routing_entry - self._multicast_routing_entries.append(multicast_routing_entry) + self._entries_by_key_mask[tuple_key] = multicast_routing_entry # update default routed counter if required if multicast_routing_entry.defaultable: @@ -129,7 +117,7 @@ def multicast_routing_entries(self): :rtype: iterable(~spinn_machine.MulticastRoutingEntry) :raise None: does not raise any known exceptions """ - return self._multicast_routing_entries + return self._entries_by_key_mask.values() @property @overrides(AbstractMulticastRoutingTable.number_of_entries) @@ -139,7 +127,7 @@ def number_of_entries(self): :rtype: int """ - return len(self._multicast_routing_entries) + return len(self._entries_by_key_mask) @property @overrides(AbstractMulticastRoutingTable.number_of_defaultable_entries) @@ -151,51 +139,13 @@ def number_of_defaultable_entries(self): """ return self._number_of_defaulted_routing_entries - def get_entry_by_routing_entry_key(self, routing_entry_key): - """ Get the routing entry associated with the specified key \ - or ``None`` if the routing table does not match the key - - :param int routing_entry_key: the routing key to be searched - :return: the routing entry associated with the routing key_combo or - ``None`` if no such entry exists - :rtype: ~spinn_machine.MulticastRoutingEntry or None - """ - if routing_entry_key in self._entries_by_key: - return self._entries_by_key[routing_entry_key] - return None - - def get_multicast_routing_entry_by_routing_entry_key( - self, routing_entry_key, mask): - """ Get the routing entry associated with the specified key_combo-mask\ - combination or ``None`` if the routing table does not match the\ - key_combo - - :param int routing_entry_key: the routing key to be searched - :param int mask: the routing mask to be searched - :return: the routing entry associated with the routing key_combo or - ``None`` if no such entry exists - :rtype: ~spinn_machine.MulticastRoutingEntry or None - """ - if (routing_entry_key & mask) != routing_entry_key: - raise PacmanRoutingException( - "The key {} is changed when masked with the mask {}." - " This is determined to be an error in the tool chain. Please " - "correct this and try again.".format(routing_entry_key, mask)) - - tuple_key = (routing_entry_key, mask) - if tuple_key in self._entries_by_key_mask: - return self._entries_by_key_mask[ - tuple_key] - return None - @overrides(AbstractMulticastRoutingTable.__eq__) def __eq__(self, other): if not isinstance(other, UnCompressedMulticastRoutingTable): return False if self._x != other.x and self._y != other.y: return False - return self._multicast_routing_entries == \ - other.multicast_routing_entries + return self._entries_by_key_mask == other._entries_by_key_mask @overrides(AbstractMulticastRoutingTable.__ne__) def __ne__(self, other): @@ -204,7 +154,7 @@ def __ne__(self, other): @overrides(AbstractMulticastRoutingTable.__repr__) def __repr__(self): entry_string = "" - for entry in self._multicast_routing_entries: + for entry in self.multicast_routing_entries: entry_string += "{}\n".format(entry) return "{}:{}\n\n{}".format(self._x, self._y, entry_string) diff --git a/pacman/model/tags/tags.py b/pacman/model/tags/tags.py index 96598dadd..b9a6ab0b9 100644 --- a/pacman/model/tags/tags.py +++ b/pacman/model/tags/tags.py @@ -134,6 +134,16 @@ def add_reverse_ip_tag(self, reverse_ip_tag, vertex): self._ports_assigned.add( (reverse_ip_tag.board_address, reverse_ip_tag.port)) + @property + def ip_tags_vertices(self): + """ List the (IPTag, vertex) pairs stored + + :rtype: iterable(tuple(IPTag, MachineVertex)) + """ + yield from [(tag, vert) + for vert, tags in self._ip_tags_by_vertex.items() + for tag in tags] + @property def ip_tags(self): """ The IP tags assigned diff --git a/pacman/operations/chip_id_allocator_algorithms/__init__.py b/pacman/operations/chip_id_allocator_algorithms/__init__.py deleted file mode 100644 index e243038c5..000000000 --- a/pacman/operations/chip_id_allocator_algorithms/__init__.py +++ /dev/null @@ -1,18 +0,0 @@ -# Copyright (c) 2017-2019 The University of Manchester -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . - -from .malloc_based_chip_id_allocator import malloc_based_chip_id_allocator - -__all__ = ['malloc_based_chip_id_allocator', ] diff --git a/pacman/operations/chip_id_allocator_algorithms/malloc_based_chip_id_allocator.py b/pacman/operations/chip_id_allocator_algorithms/malloc_based_chip_id_allocator.py deleted file mode 100644 index 03612b18b..000000000 --- a/pacman/operations/chip_id_allocator_algorithms/malloc_based_chip_id_allocator.py +++ /dev/null @@ -1,133 +0,0 @@ -# Copyright (c) 2017-2019 The University of Manchester -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . - -import logging -from spinn_utilities.log import FormatAdapter -from spinn_utilities.progress_bar import ProgressBar -from pacman.exceptions import PacmanConfigurationException -from pacman.model.graphs import ( - AbstractFPGA, AbstractSpiNNakerLink, AbstractVirtual) -from pacman.utilities.algorithm_utilities import machine_algorithm_utilities - -logger = FormatAdapter(logging.getLogger(__name__)) -_LOWER_16_BITS = 0xFFFF - - -class NoFPGALink(PacmanConfigurationException): - def __init__(self, vertex): - super().__init__( - "No FPGA Link {} on FPGA {} found on board {}. This would be " - "true if another chip was found connected at this point".format( - vertex.fpga_link_id, vertex.fpga_id, vertex.board_address)) - - -class NoSpiNNakerLink(PacmanConfigurationException): - def __init__(self, vertex): - super().__init__( - "No SpiNNaker Link {} found on board {}. This would be true if " - "another chip was found connected at this point".format( - vertex.spinnaker_link_id, vertex.board_address)) - - -def malloc_based_chip_id_allocator(machine, graph): - """ - :param ~spinn_machine.Machine machine: - :param graph: - :type graph: Graph - :rtype: ~spinn_machine.Machine - :raises PacmanConfigurationException: - If a virtual chip is in an impossible position. - """ - allocator = _MallocBasedChipIdAllocator() - # pylint:disable=protected-access - return allocator._run(machine, graph) - - -class _MallocBasedChipIdAllocator(object): - """ A Chip ID Allocation Allocator algorithm that keeps track of\ - chip IDs and attempts to allocate them as requested - """ - - __slots__ = [ - # dict of [virtual chip data] = (x,y) - "_virtual_chips" - ] - - def __init__(self): - # we only want one virtual chip per 'link' - self._virtual_chips = dict() - - def _run(self, machine, graph=None): - """ - :param ~spinn_machine.Machine machine: - :param graph: - :type graph: Graph or None - :rtype: ~spinn_machine.Machine - :raises PacmanConfigurationException: - If a virtual chip is in an impossible position. - """ - if graph is not None: - self.allocate_chip_ids(machine, graph) - return machine - - def allocate_chip_ids(self, machine, graph): - """ Go through the chips (real and virtual) and allocate keys for each - - :param ~spinn_machine.Machine machine: - :param Graph graph: - :raises PacmanConfigurationException: - If a virtual chip is in an impossible position. - """ - progress = ProgressBar( - graph.n_vertices + machine.n_chips, - "Allocating virtual identifiers") - - # allocate IDs for virtual chips - for vertex in progress.over(graph.vertices): - if isinstance(vertex, AbstractVirtual): - x, y = self._assign_virtual_chip_info( - machine, self._get_link_data(machine, vertex)) - vertex.set_virtual_chip_coordinates(x, y) - - @staticmethod - def _get_link_data(machine, vertex): - if isinstance(vertex, AbstractFPGA): - link_data = machine.get_fpga_link_with_id( - vertex.fpga_id, vertex.fpga_link_id, vertex.board_address) - if link_data is None: - raise NoFPGALink(vertex) - return link_data - elif isinstance(vertex, AbstractSpiNNakerLink): - link_data = machine.get_spinnaker_link_with_id( - vertex.spinnaker_link_id, vertex.board_address) - if link_data is None: - raise NoSpiNNakerLink(vertex) - return link_data - else: - # Ugh; this means we can't handle link data for arbitrary classes - raise PacmanConfigurationException( - "Unknown virtual vertex type {}".format(vertex.__class__)) - - def _assign_virtual_chip_info(self, machine, link_data): - # If we've seen the link data before, return the allocated ID we have - if link_data in self._virtual_chips: - return self._virtual_chips[link_data] - - # Allocate a new ID and cache it for later - chip_id_x, chip_id_y = machine.get_unused_xy() - machine_algorithm_utilities.create_virtual_chip( - machine, link_data, chip_id_x, chip_id_y) - self._virtual_chips[link_data] = (chip_id_x, chip_id_y) - return chip_id_x, chip_id_y diff --git a/pacman/operations/multi_cast_router_check_functionality/valid_routes_checker.py b/pacman/operations/multi_cast_router_check_functionality/valid_routes_checker.py index 5e0f44a4b..d6b28804c 100644 --- a/pacman/operations/multi_cast_router_check_functionality/valid_routes_checker.py +++ b/pacman/operations/multi_cast_router_check_functionality/valid_routes_checker.py @@ -12,10 +12,9 @@ # # You should have received a copy of the GNU General Public License # along with this program. If not, see . - """ Collection of functions which together validate routes. """ -from collections import namedtuple +from collections import namedtuple, defaultdict import logging from spinn_utilities.ordered_set import OrderedSet from spinn_utilities.progress_bar import ProgressBar @@ -23,9 +22,12 @@ from pacman.exceptions import PacmanRoutingException from pacman.model.constraints.key_allocator_constraints import ( ContiguousKeyRangeContraint) -from pacman.model.graphs.common import EdgeTrafficType +from pacman.model.graphs.application import ApplicationVertex from pacman.utilities.utility_calls import locate_constraints_of_type from pacman.utilities.constants import FULL_MASK +from pacman.utilities.algorithm_utilities.routing_algorithm_utilities import ( + get_app_partitions) + logger = FormatAdapter(logging.getLogger(__name__)) range_masks = {FULL_MASK - ((2 ** i) - 1) for i in range(33)} @@ -36,13 +38,13 @@ _Failure = namedtuple('_Failure', 'router_x router_y keys source_mask') -def validate_routes(machine_graph, placements, routing_infos, +def validate_routes(graph, placements, routing_infos, routing_tables, machine): """ Go though the placements given and check that the routing entries\ within the routing tables support reach the correction destinations\ as well as not producing any cycles. - :param MachineGraph machine_graph: the graph + :param ApplicationGraph graph: the graph :param Placements placements: the placements container :param RoutingInfo routing_infos: the routing info container :param MulticastRoutingTables routing_tables: @@ -52,55 +54,60 @@ def validate_routes(machine_graph, placements, routing_infos, found by the search on a given router, or a cycle is detected """ - def traffic_multicast(edge): - return edge.traffic_type == EdgeTrafficType.MULTICAST - - progress = ProgressBar( - placements.placements, - "Verifying the routes from each core travel to the correct locations") - for placement in progress.over(placements.placements): + # Find all partitions that need to be dealt with + partitions = get_app_partitions(graph) + + # Now go through the app edges and route app vertex by app vertex + progress = ProgressBar(len(partitions), "Checking Routes") + for partition in progress.over(partitions): + source = partition.pre_vertex + + # Destination cores by source machine vertices + destinations = defaultdict(OrderedSet) + + for edge in partition.edges: + target = edge.post_vertex + target_vertices = \ + target.splitter.get_source_specific_in_coming_vertices( + source, partition.identifier) + + for tgt, srcs in target_vertices: + place = placements.get_placement_of_vertex(tgt) + for src in srcs: + if isinstance(src, ApplicationVertex): + for s in src.splitter.get_out_going_vertices( + partition.identifier): + destinations[s].add(PlacementTuple( + x=place.x, y=place.y, p=place.p)) + else: + destinations[src].add(PlacementTuple( + x=place.x, y=place.y, p=place.p)) + + outgoing = OrderedSet(source.splitter.get_out_going_vertices( + partition.identifier)) + internal = source.splitter.get_internal_multicast_partitions() + for in_part in internal: + if in_part.partition_id == partition.identifier: + outgoing.add(in_part.pre_vertex) + for edge in in_part.edges: + place = placements.get_placement_of_vertex( + edge.post_vertex) + destinations[in_part.pre_vertex].add(PlacementTuple( + x=place.x, y=place.y, p=place.p)) # locate all placements to which this placement/vertex will # communicate with for a given key_and_mask and search its # determined destinations - - # gather keys and masks per partition - partitions = machine_graph.\ - get_multicast_edge_partitions_starting_at_vertex(placement.vertex) - - n_atoms = placement.vertex.vertex_slice.n_atoms - - for partition in partitions: - r_info = routing_infos.get_routing_info_from_partition( - partition) - is_continuous = _check_if_partition_has_continuous_keys(partition) - if not is_continuous: - logger.warning( - "Due to the none continuous nature of the keys in this " - "partition {}, we cannot check all atoms will be routed " - "correctly, but will check the base key instead", - partition) - - destination_placements = OrderedSet() - - # filter for just multicast edges, we don't check other types of - # edges here. - out_going_edges = filter(traffic_multicast, partition.edges) - - # for every outgoing edge, locate its destination and store it. - for outgoing_edge in out_going_edges: - dest_placement = placements.get_placement_of_vertex( - outgoing_edge.post_vertex) - destination_placements.add( - PlacementTuple(x=dest_placement.x, - y=dest_placement.y, - p=dest_placement.p)) + for m_vertex in outgoing: + placement = placements.get_placement_of_vertex(m_vertex) + r_info = routing_infos.get_routing_info_from_pre_vertex( + m_vertex, partition.identifier) # search for these destinations for key_and_mask in r_info.keys_and_masks: _search_route( - placement, destination_placements, key_and_mask, - routing_tables, machine, n_atoms, is_continuous) + placement, destinations[m_vertex], key_and_mask, + routing_tables, machine, m_vertex.vertex_slice.n_atoms) def _check_if_partition_has_continuous_keys(partition): @@ -115,13 +122,13 @@ def _check_if_partition_has_continuous_keys(partition): def _search_route(source_placement, dest_placements, key_and_mask, - routing_tables, machine, n_atoms, is_continuous): + routing_tables, machine, n_atoms): """ Locate if the routing tables work for the source to desks as\ defined :param Placement source_placement: the placement from which the search started - :param iterable(PlacementTuple) dest_placements: + :param iterable(Placement) dest_placements: the placements to which this trace should visit only once :param BaseKeyAndMask key_and_mask: the key and mask associated with this set of edges @@ -143,9 +150,8 @@ def _search_route(source_placement, dest_placements, key_and_mask, failed_to_cover_all_keys_routers = list() _start_trace_via_routing_tables( - source_placement, key_and_mask, located_destinations, - routing_tables, machine, n_atoms, is_continuous, - failed_to_cover_all_keys_routers) + source_placement, key_and_mask, located_destinations, routing_tables, + machine, n_atoms, failed_to_cover_all_keys_routers) # start removing from located_destinations and check if destinations not # reached @@ -209,7 +215,7 @@ def _search_route(source_placement, dest_placements, key_and_mask, def _start_trace_via_routing_tables( source_placement, key_and_mask, reached_placements, routing_tables, - machine, n_atoms, is_continuous, failed_to_cover_all_keys_routers): + machine, n_atoms, failed_to_cover_all_keys_routers): """ Start the trace, by using the source placement's router and tracing\ from the route. @@ -221,7 +227,6 @@ def _start_trace_via_routing_tables( :param MulticastRoutingTables routing_tables: :param ~spinn_machine.Machine machine: :param int n_atoms: the number of atoms going through this path - :param bool is_continuous: if the keys and atoms mapping is continuous :param list(_Failure) failed_to_cover_all_keys_routers: list of failed routers for all keys :rtype: None @@ -238,7 +243,7 @@ def _start_trace_via_routing_tables( _recursive_trace_to_destinations( entry, current_router_table, source_placement.x, source_placement.y, key_and_mask, visited_routers, - reached_placements, machine, routing_tables, is_continuous, n_atoms, + reached_placements, machine, routing_tables, n_atoms, failed_to_cover_all_keys_routers) @@ -262,7 +267,7 @@ def _check_all_keys_hit_entry(entry, n_atoms, base_key): # locates the next dest position to check def _recursive_trace_to_destinations( entry, current_router, chip_x, chip_y, key_and_mask, visited_routers, - reached_placements, machine, routing_tables, is_continuous, n_atoms, + reached_placements, machine, routing_tables, n_atoms, failed_to_cover_all_keys_routers): """ Recursively search though routing tables until no more entries are\ registered with this key. @@ -317,20 +322,18 @@ def _recursive_trace_to_destinations( entry = _locate_routing_entry( next_router, key_and_mask.key, n_atoms) - if is_continuous: - bad_entries = _check_all_keys_hit_entry( - entry, n_atoms, key_and_mask.key) - if bad_entries: - failed_to_cover_all_keys_routers.append( - _Failure(next_router.x, next_router.y, - bad_entries, key_and_mask.mask)) + bad_entries = _check_all_keys_hit_entry( + entry, n_atoms, key_and_mask.key) + if bad_entries: + failed_to_cover_all_keys_routers.append( + _Failure(next_router.x, next_router.y, + bad_entries, key_and_mask.mask)) # get next route value from the new router _recursive_trace_to_destinations( entry, next_router, link.destination_x, link.destination_y, key_and_mask, visited_routers, reached_placements, machine, - routing_tables, is_continuous, n_atoms, - failed_to_cover_all_keys_routers) + routing_tables, n_atoms, failed_to_cover_all_keys_routers) # only goes to a processor elif processor_values: diff --git a/pacman/operations/partition_algorithms/splitter_partitioner.py b/pacman/operations/partition_algorithms/splitter_partitioner.py index 68f2beccd..eb28940a0 100644 --- a/pacman/operations/partition_algorithms/splitter_partitioner.py +++ b/pacman/operations/partition_algorithms/splitter_partitioner.py @@ -13,43 +13,26 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . -from pacman.exceptions import (PacmanConfigurationException) -from pacman.model.graphs.machine import MachineGraph -from pacman.model.partitioner_interfaces import ( - AbstractSplitterPartitioner, AbstractSlicesConnect) -from pacman.model.partitioner_splitters.abstract_splitters\ - .abstract_dependent_splitter import AbstractDependentSplitter -from pacman.utilities.algorithm_utilities.placer_algorithm_utilities import ( - sort_vertices_by_known_constraints) -from pacman.utilities.utility_objs import ResourceTracker -from spinn_utilities.overrides import overrides +from pacman.model.partitioner_interfaces import AbstractSplitterPartitioner from spinn_utilities.progress_bar import ProgressBar +from pacman.utilities.utility_objs.chip_counter import ChipCounter -def splitter_partitioner( - app_graph, machine, plan_n_time_steps, pre_allocated_resources=None): +def splitter_partitioner(app_graph, plan_n_time_steps): """ :param ApplicationGraph app_graph: The application_graph to partition - :param ~spinn_machine.Machine machine: - The machine with respect to which to partition the application - graph :param plan_n_time_steps: the number of time steps to plan to run for :type plan_n_time_steps: int or None - :param pre_allocated_resources: - res needed to be preallocated before making new machine vertices - :type pre_allocated_resources: PreAllocatedResourceContainer or None :return: - A machine_graph of partitioned vertices and partitioned edges, - and the number of chips needed to satisfy this partitioning. - :rtype: tuple(MachineGraph, int) + The number of chips needed to satisfy this partitioning. + :rtype: int :raise PacmanPartitionException: If something goes wrong with the partitioning """ partitioner = _SplitterPartitioner() # pylint:disable=protected-access - return partitioner._run( - app_graph, machine, plan_n_time_steps, pre_allocated_resources) + return partitioner._run(app_graph, plan_n_time_steps) class _SplitterPartitioner(AbstractSplitterPartitioner): @@ -57,243 +40,26 @@ class _SplitterPartitioner(AbstractSplitterPartitioner): splitter objects. """ - MACHINE_EDGE_LABEL = "machine_edge_for_{}" - - __PROGRESS_BAR_VERTICES = "Partitioning graph vertices" - __PROGRESS_BAR_EDGES = "Partitioning graph edges" - - __ERROR_MESSAGE_OF_NO_COMMON_EDGE_TYPE = ( - "There was no common edge type between vertex {} and {}. " - "This means there is no agreed way these 2 vertices can " - "communicate with each and therefore a machine edge cannot " - "be created. Please fix and try again") - - __ERROR_MESSAGE_CONFLICT_FIXED_ATOM = ( - "Vertex has multiple contradictory fixed atom " - "constraints - cannot be both {} and {}") - - __ERROR_MESSAGE_CONFLICT_MAX_ATOMS = ( - "Max size of {} is incompatible with fixed size of {}") - - __ERROR_MESSAGE_FAILED_DIVISION = ( - "Vertex of {} atoms cannot be divided into units of {}") - __slots__ = [] # inherited from AbstractPartitionAlgorithm - def _run( - self, app_graph, machine, plan_n_time_steps, - pre_allocated_resources=None): + def _run(self, app_graph, plan_n_time_steps): """ :param ApplicationGraph app_graph: The application_graph to partition - :param ~spinn_machine.Machine machine: - The machine with respect to which to partition the application - graph - :param plan_n_time_steps: - the number of time steps to plan to run for - :type plan_n_time_steps: int or None - :param pre_allocated_resources: - res needed to be preallocated before making new machine vertices - :type pre_allocated_resources: PreAllocatedResourceContainer or None + :param plan_n_time_steps: the number of time steps to plan to run for :return: - A machine_graph of partitioned vertices and partitioned edges, - and the number of chips needed to satisfy this partitioning. - :rtype: tuple(MachineGraph, int) + the estimated number of chips needed to satisfy this partitioning. + :rtype: int :raise PacmanPartitionException: If something goes wrong with the partitioning """ - # check resource tracker can handle constraints - ResourceTracker.check_constraints(app_graph.vertices) - - # get the setup objects - (machine_graph, resource_tracker, vertices, progress) = ( - self.__setup_objects( - app_graph, machine, plan_n_time_steps, - pre_allocated_resources)) + vertices = app_graph.vertices + progress = ProgressBar(len(vertices), "Partitioning Graph") # Partition one vertex at a time + chip_counter = ChipCounter(plan_n_time_steps) for vertex in progress.over(vertices): - vertex.splitter.split(resource_tracker, machine_graph) - - # process edges - self.__process_machine_edges( - app_graph, machine_graph, resource_tracker) - - # return the accepted things - return machine_graph, resource_tracker.chips_used - - def __make_dependent_after(self, vertices, dependent_vertices, dependent): - """ orders the vertices so that dependents are split after the\ - things they depend upon. - - :param list(MachineVertex) vertices: machine vertices - :param list(ApplicationVertex) dependent_vertices: - list of dependent vertices - :param ApplicationVertex dependent: - the vertex that's dependent on things. - """ - if dependent in dependent_vertices: - other_app_vertex = dependent_vertices[dependent] - # check the other is not also dependent - self.__make_dependent_after( - vertices, dependent_vertices, other_app_vertex) - old_index = vertices.index(dependent) - other_index = vertices.index(other_app_vertex) - if old_index < other_index: - vertices.insert(other_index + 1, vertices.pop(old_index)) - - def order_vertices_for_dependent_splitters(self, vertices): - """ orders the list so that dependent splitters are next to their \ - other splitter in terms of vertex ordering. - - :param iterable(ApplicationVertex) vertices: - the list of application vertices - :return: vertices in list with new ordering - :rtype: iterable(ApplicationVertex) - """ - dependent_vertices = dict() - other_vertices = set() - for vertex in vertices: - if isinstance(vertex.splitter, AbstractDependentSplitter): - other_splitter = vertex.splitter.other_splitter - if other_splitter: - other_app_vertex = other_splitter.governed_app_vertex - other_vertices.add(other_app_vertex) - dependent_vertices[vertex] = other_app_vertex - - for vertex in dependent_vertices: - # As we do the whole dependency chain only start at the bottom - if vertex not in other_vertices: - self.__make_dependent_after( - vertices, dependent_vertices, vertex) - - def __setup_objects( - self, app_graph, machine, plan_n_time_steps, - pre_allocated_resources): - """ sets up the machine_graph, resource_tracker, vertices, \ - progress bar. - - :param ApplicationGraph app_graph: app graph - :param ~spinn_machine.Machine machine: machine - :param int plan_n_time_steps: the number of time steps to run for. - :param pre_allocated_resources: pre allocated res from other systems. - :type PreAllocatedResourceContainer or None - :return: (machine graph, res tracker, verts, progress bar) - :rtype: tuple(MachineGraph, ResourceTracker, list(ApplicationVertex), - ~.ProgressBar) - """ - # Load the vertices and create the machine_graph to fill - machine_graph = MachineGraph( - label="partitioned graph for {}".format(app_graph.label), - application_graph=app_graph) - - resource_tracker = ResourceTracker( - machine, plan_n_time_steps, - preallocated_resources=pre_allocated_resources) - - # sort out vertex's by placement constraints - vertices = sort_vertices_by_known_constraints(app_graph.vertices) - - # Group vertices that are supposed to be the same size - self.order_vertices_for_dependent_splitters(vertices) - - # Set up the progress - progress = ProgressBar( - len(app_graph.vertices), self.__PROGRESS_BAR_VERTICES) - - return machine_graph, resource_tracker, vertices, progress - - def __locate_common_edge_type( - self, pre_edge_types, post_edge_types, src_machine_vertex, - dest_machine_vertex): - """ searches the sets of edge types and finds the common one. if more\ - than one common, is biased towards the destination common and the\ - order of the list. - - :param pre_edge_types: - the edge types the pre vertex can support for transmission - :param post_edge_types: - the edge types the post vertex can support for reception. - :param MachineVertex src_machine_vertex: used for error message - :param MachineVertex dest_machine_vertex: used for error message - :return: MachineEdge class - :rtype: type - :raises PacmanConfigurationException: - If we can't find a workable class - """ - for post_edge_type in post_edge_types: - if post_edge_type in pre_edge_types: - return post_edge_type - - # if iterated over the post edge types and not found a common type. - # Blow up coz no way these two can communicate with each other. - raise PacmanConfigurationException( - self.__ERROR_MESSAGE_OF_NO_COMMON_EDGE_TYPE.format( - src_machine_vertex, dest_machine_vertex)) - - def __process_machine_edges( - self, app_graph, machine_graph, resource_tracker): - """ generate the machine edges for the machine graph - - :param ApplicationGraph app_graph: app graph - :param MachineGraph machine_graph: machine graph - :param ResourceTracker resource_tracker: resource tracker - """ - # process edges - progress = ProgressBar( - app_graph.n_outgoing_edge_partitions, self.__PROGRESS_BAR_EDGES) - - # go over outgoing partitions - for app_outgoing_edge_partition in progress.over( - app_graph.outgoing_edge_partitions): - - # go through each edge - for app_edge in app_outgoing_edge_partition.edges: - src_vertices_edge_type_map = ( - app_edge.pre_vertex.splitter.get_out_going_vertices( - app_edge, app_outgoing_edge_partition)) - - # go through each pre vertices - for src_machine_vertex in src_vertices_edge_type_map: - splitter = app_edge.post_vertex.splitter - dest_vertices_edge_type_map = ( - splitter.get_in_coming_vertices( - app_edge, app_outgoing_edge_partition, - src_machine_vertex)) - - # go through the post vertices - for dest_machine_vertex in dest_vertices_edge_type_map: - # get the accepted edge types for each vertex - pre_edge_types = ( - src_vertices_edge_type_map[src_machine_vertex]) - post_edge_types = ( - dest_vertices_edge_type_map[dest_machine_vertex]) - - # locate the common edge type - common_edge_type = self.__locate_common_edge_type( - pre_edge_types, post_edge_types, - src_machine_vertex, dest_machine_vertex) - - self.create_machine_edge( - src_machine_vertex, dest_machine_vertex, - common_edge_type, app_edge, machine_graph, - app_outgoing_edge_partition, resource_tracker) - - @overrides(AbstractSplitterPartitioner.create_machine_edge) - def create_machine_edge( - self, src_machine_vertex, dest_machine_vertex, - common_edge_type, app_edge, machine_graph, - app_outgoing_edge_partition, resource_tracker): - - if (isinstance(app_edge, AbstractSlicesConnect) and not - app_edge.could_connect( - src_machine_vertex, dest_machine_vertex)): - return + vertex.splitter.create_machine_vertices(chip_counter) - # build edge and add to machine graph - machine_edge = common_edge_type( - src_machine_vertex, dest_machine_vertex, app_edge=app_edge, - label=self.MACHINE_EDGE_LABEL.format(app_edge.label)) - machine_graph.add_edge( - machine_edge, app_outgoing_edge_partition.identifier) + return chip_counter.n_chips diff --git a/pacman/operations/placer_algorithms/__init__.py b/pacman/operations/placer_algorithms/__init__.py index b5f0b1f56..ac018accd 100644 --- a/pacman/operations/placer_algorithms/__init__.py +++ b/pacman/operations/placer_algorithms/__init__.py @@ -12,11 +12,6 @@ # # You should have received a copy of the GNU General Public License # along with this program. If not, see . +from .application_placer import place_application_graph -from .radial_placer import radial_placer -from .one_to_one_placer import one_to_one_placer -from .spreader_placer import spreader_placer -from .connective_based_placer import connective_based_placer - -__all__ = ['radial_placer', 'one_to_one_placer', "spreader_placer", - 'connective_based_placer'] +__all__ = ['place_application_graph'] diff --git a/pacman/operations/placer_algorithms/application_placer.py b/pacman/operations/placer_algorithms/application_placer.py new file mode 100644 index 000000000..1ec4a5266 --- /dev/null +++ b/pacman/operations/placer_algorithms/application_placer.py @@ -0,0 +1,563 @@ +# Copyright (c) 2021 The University of Manchester +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +from pacman.model.placements import Placements, Placement +from pacman.model.graphs import AbstractVirtual +from pacman.exceptions import ( + PacmanPlaceException, PacmanConfigurationException) +from pacman.utilities.utility_calls import locate_constraints_of_type +from pacman.model.constraints.placer_constraints import ChipAndCoreConstraint +from spinn_utilities.ordered_set import OrderedSet +from spinn_utilities.progress_bar import ProgressBar +from spinn_utilities.log import FormatAdapter +from spinn_utilities.config_holder import get_config_bool +import os +import numpy +import logging + +logger = FormatAdapter(logging.getLogger(__name__)) + + +def place_application_graph( + machine, app_graph, plan_n_timesteps, system_placements, + report_folder=None): + """ Perform placement of an application graph on the machine. + NOTE: app_graph must have been partitioned + """ + + # Track the placements and space + placements = Placements(system_placements) + # board_colours = dict() + spaces = _Spaces(machine, placements, plan_n_timesteps) + + # Go through the application graph by application vertex + progress = ProgressBar(app_graph.n_vertices, "Placing Vertices") + for app_vertex in progress.over(app_graph.vertices): + spaces.restore_chips() + + # Try placements from the next chip, but try again if fails + placed = False + while not placed: + chips_attempted = list() + try: + + same_chip_groups = app_vertex.splitter.get_same_chip_groups() + + if not same_chip_groups: + placed = True + break + + # Start a new space + try: + next_chip_space, space = spaces.get_next_chip_and_space() + except PacmanPlaceException as e: + _place_error( + app_graph, placements, system_placements, e, + plan_n_timesteps, machine, report_folder) + logger.debug(f"Starting placement from {next_chip_space}") + + placements_to_make = list() + + # Go through the groups + last_chip_space = None + for vertices, sdram in same_chip_groups: + vertices_to_place = list() + for vertex in vertices: + # No need to place virtual vertices + if isinstance(vertex, AbstractVirtual): + continue + if not placements.is_vertex_placed(vertex): + vertices_to_place.append(vertex) + sdram = sdram.get_total_sdram(plan_n_timesteps) + n_cores = len(vertices_to_place) + + if _do_constraints(vertices_to_place, sdram, placements, + machine, next_chip_space): + continue + + # Try to find a chip with space; this might result in a + # SpaceExceededException + while not next_chip_space.is_space(n_cores, sdram): + next_chip_space = spaces.get_next_chip_space( + space, last_chip_space) + last_chip_space = None + + # If this worked, store placements to be made + last_chip_space = next_chip_space + chips_attempted.append(next_chip_space.chip) + _store_on_chip( + placements_to_make, vertices_to_place, sdram, + next_chip_space) + + # Now make the placements having confirmed all can be done + placements.add_placements(placements_to_make) + placed = True + logger.debug(f"Used {chips_attempted}") + except _SpaceExceededException: + # This might happen while exploring a space; this may not be + # fatal since the last space might have just been bound by + # existing placements, and there might be bigger spaces out + # there to use + logger.debug(f"Failed, saving {chips_attempted}") + spaces.save_chips(chips_attempted) + chips_attempted.clear() + + if (get_config_bool("Reports", "draw_placements") and + report_folder is not None): + report_file = os.path.join(report_folder, "placements.png") + _draw_placements(machine, report_file, placements, system_placements) + + return placements + + +def _place_error( + app_graph, placements, system_placements, exception, plan_n_timesteps, + machine, report_folder): + unplaceable = list() + vertex_count = 0 + n_vertices = 0 + for app_vertex in app_graph.vertices: + same_chip_groups = app_vertex.splitter.get_same_chip_groups() + app_vertex_placed = True + found_placed_cores = False + for vertices, _sdram in same_chip_groups: + if placements.is_vertex_placed(vertices[0]): + found_placed_cores = True + elif found_placed_cores: + vertex_count += len(vertices) + n_vertices = len(same_chip_groups) + app_vertex_placed = False + break + else: + app_vertex_placed = False + break + if not app_vertex_placed: + unplaceable.append(app_vertex) + + report_file = os.path.join(report_folder, "placements_error.txt") + with open(report_file, 'w') as f: + f.write(f"Could not place {len(unplaceable)} of {app_graph.n_vertices}" + " application vertices.\n") + f.write(f" Could not place {vertex_count} of {n_vertices} in the" + " last app vertex\n\n") + for x, y in placements.chips_with_placements: + first = True + for placement in placements.placements_on_chip(x, y): + if system_placements.is_vertex_placed(placement.vertex): + continue + if first: + f.write(f"Chip ({x}, {y}):\n") + first = False + f.write(f" Processor {placement.p}:" + f" Vertex {placement.vertex}\n") + if not first: + f.write("\n") + f.write("\n") + f.write("Not placed:\n") + for app_vertex in unplaceable: + f.write(f"Vertex: {app_vertex}\n") + same_chip_groups = app_vertex.splitter.get_same_chip_groups() + for vertices, sdram in same_chip_groups: + f.write(f" Group of {len(vertices)} vertices uses " + f"{sdram.get_total_sdram(plan_n_timesteps)} " + "bytes of SDRAM:\n") + for vertex in vertices: + f.write(f" Vertex {vertex}") + if placements.is_vertex_placed(vertex): + plce = placements.get_placement_of_vertex(vertex) + f.write(f" (placed at {plce.x}, {plce.y}, {plce.p})") + f.write("\n") + + f.write("\n") + f.write("Unused chips:\n") + for x, y in machine.chip_coordinates: + n_placed = placements.n_placements_on_chip(x, y) + system_placed = system_placements.n_placements_on_chip(x, y) + if n_placed - system_placed == 0: + n_procs = machine.get_chip_at(x, y).n_user_processors + f.write(f" {x}, {y} ({n_procs - system_placed}" + " free cores)\n") + + if (get_config_bool("Reports", "draw_placements_on_error") and + report_folder is not None): + report_file = os.path.join(report_folder, "placements_error.png") + _draw_placements(machine, report_file, placements, system_placements) + + raise PacmanPlaceException( + f" {exception}." + f" Report written to {report_file}.") + + +def _next_colour(): + """ Get the next (random) RGB colour to use for a vertex for placement + drawings + + :rtype: tuple(int, int, int) + """ + return tuple(numpy.concatenate( + (numpy.random.choice(range(256), size=3) / 256, [1.0]))) + + +def _draw_placements(machine, report_file, placements, system_placements): + # pylint: disable=import-error + from spinner.scripts.contexts import PNGContextManager + from spinner.diagrams.machine_map import ( + get_machine_map_aspect_ratio, draw_machine_map) + from spinner import board + from collections import defaultdict + import math + + # Colour the boards by placements + unused = (0.5, 0.5, 0.5, 1.0) + vertex_colours = defaultdict(_next_colour) + board_colours = dict() + for x, y in machine.chip_coordinates: + if (placements.n_placements_on_chip(x, y) == + system_placements.n_placements_on_chip(x, y)): + board_colours[x, y] = unused + else: + vertex = None + for placement in placements.placements_on_chip(x, y): + if not system_placements.is_vertex_placed(placement.vertex): + vertex = placement.vertex + break + if vertex is not None: + board_colours[x, y] = vertex_colours[vertex.app_vertex] + + include_boards = [ + (chip.x, chip.y) for chip in machine.ethernet_connected_chips] + w = math.ceil(machine.width / 12) + h = math.ceil(machine.height / 12) + aspect_ratio = get_machine_map_aspect_ratio(w, h) + image_width = 10000 + image_height = int(image_width * aspect_ratio) + output_filename = report_file + hex_boards = board.create_torus(w, h) + with PNGContextManager( + output_filename, image_width, image_height) as ctx: + draw_machine_map(ctx, image_width, image_height, w, h, hex_boards, + dict(), board_colours, include_boards) + + +class _SpaceExceededException(Exception): + pass + + +def _do_constraints(vertices, sdram, placements, machine, next_chip_space): + """ + + :param vertices: + :param sdram: + :param placements: + :param machine: + :param _ChipWithSpace next_chip_space: + :return: + """ + x = None + y = None + constrained = False + for vertex in vertices: + constraints = locate_constraints_of_type( + vertex.constraints, ChipAndCoreConstraint) + for constraint in constraints: + if constrained and (constraint.x != x or constraint.y != y): + raise PacmanConfigurationException( + f"Multiple conflicting constraints: Vertices {vertices}" + " are on the same chip, but constraints say different") + x = constraint.x + y = constraint.y + constrained = True + if constrained: + chip = machine.get_chip_at(x, y) + if chip is None: + raise PacmanConfigurationException( + f"Constrained to chip {x, y} but no such chip") + on_chip = placements.placements_on_chip(x, y) + cores_used = {p.p for p in on_chip} + cores = set(p.processor_id for p in chip.processors + if not p.is_monitor) - cores_used + next_cores = iter(cores) + for vertex in vertices: + next_core = None + constraints = locate_constraints_of_type( + vertex.constraints, ChipAndCoreConstraint) + for constraint in constraints: + if constraint.p is not None: + if next_core is not None and next_core != constraint.p: + raise PacmanConfigurationException( + f"Vertex {vertex} constrained to more than one" + " core") + next_core = constraint.p + if next_core is not None and next_core not in next_cores: + raise PacmanConfigurationException( + f"Core {next_core} on {x}, {y} not available to place" + f" {vertex} on") + if next_core is None: + try: + next_core = next(next_cores) + except StopIteration: + raise PacmanConfigurationException( + f"No more cores available on {x}, {y}: {on_chip}") + placements.add_placement(Placement(vertex, x, y, next_core)) + if next_chip_space.x == x and next_chip_space.y == y: + next_chip_space.cores.remove(next_core) + next_chip_space.use_sdram(sdram) + return True + return False + + +def _store_on_chip(placements_to_make, vertices, sdram, next_chip_space): + """ + + :param placements_to_make: + :param vertices: + :param sdram: + :param _ChipWithSpace next_chip_space: + """ + for vertex in vertices: + core = next_chip_space.use_next_core() + placements_to_make.append(Placement( + vertex, next_chip_space.x, next_chip_space.y, core)) + next_chip_space.use_sdram(sdram) + + +class _Spaces(object): + + __slots__ = ["__machine", "__chips", "__next_chip", "__used_chips", + "__system_placements", "__placements", "__plan_n_timesteps", + "__last_chip_space", "__saved_chips", "__restored_chips"] + + def __init__(self, machine, placements, plan_n_timesteps): + self.__machine = machine + self.__placements = placements + self.__plan_n_timesteps = plan_n_timesteps + self.__chips = iter(_chip_order(machine)) + self.__next_chip = next(self.__chips) + self.__used_chips = set() + self.__last_chip_space = None + self.__saved_chips = OrderedSet() + self.__restored_chips = OrderedSet() + + def __cores_and_sdram(self, chip): + """ + + :param Chip chip: + :rtype: (int, int) + :return: + """ + on_chip = self.__placements.placements_on_chip(chip.x, chip.y) + cores_used = {p.p for p in on_chip} + sdram_used = sum( + p.vertex.resources_required.sdram.get_total_sdram( + self.__plan_n_timesteps) for p in on_chip) + return cores_used, sdram_used + + def get_next_chip_and_space(self): + """ + + :rtype: (_ChipWithSpace, _Space) + """ + try: + if self.__last_chip_space is None: + chip = self.__get_next_chip() + cores_used, sdram_used = self.__cores_and_sdram(chip) + self.__last_chip_space = _ChipWithSpace( + chip, cores_used, sdram_used) + self.__used_chips.add(chip) + + # Start a new space by finding all the chips that can be reached + # from the start chip but have not been used + return (self.__last_chip_space, + _Space(self.__last_chip_space.chip)) + + except StopIteration: + raise PacmanPlaceException( + f"No more chips to place on; {self.n_chips_used} of " + f"{self.__machine.n_chips} used") + + def __get_next_chip(self): + """ + + :rtype: Chip + """ + while self.__restored_chips: + chip = self.__restored_chips.pop(last=False) + if chip not in self.__used_chips: + return chip + while (self.__next_chip in self.__used_chips): + self.__next_chip = next(self.__chips) + return self.__next_chip + + def get_next_chip_space(self, space, last_chip_space): + """ + + :param _Space space: + :param _ChipWithSpace last_chip_space: + :rtype: _ChipWithSpace + """ + # If we are reporting a used chip, update with reachable chips + if last_chip_space is not None: + last_chip = last_chip_space.chip + space.update(self.__usable_from_chip(last_chip)) + + # If no space, error + if not space: + self.__last_chip_space = None + raise _SpaceExceededException( + "No more chips to place on in this space; " + f"{self.n_chips_used} of {self.__machine.n_chips} used") + chip = space.pop() + self.__used_chips.add(chip) + self.__restored_chips.discard(chip) + cores_used, sdram_used = self.__cores_and_sdram(chip) + self.__last_chip_space = _ChipWithSpace(chip, cores_used, sdram_used) + return self.__last_chip_space + + @property + def n_chips_used(self): + """ + + :rtype: int + :return: + """ + return len(self.__used_chips) + + def __usable_from_chip(self, chip): + """ + + :param Chip chip: + :rtype set(Chip) + """ + chips = OrderedSet() + for link in chip.router.links: + chip_coords = (link.destination_x, link.destination_y) + target_chip = self.__machine.get_chip_at(*chip_coords) + if target_chip not in self.__used_chips: + # Don't place on virtual chips + if not target_chip.virtual: + chips.add(target_chip) + return chips + + def save_chips(self, chips): + """ + :param iter(Chip) chips: + """ + self.__saved_chips.update(chips) + + def restore_chips(self): + for chip in self.__saved_chips: + self.__used_chips.remove(chip) + self.__restored_chips.add(chip) + self.__saved_chips.clear() + + +class _Space(object): + __slots__ = ["__same_board_chips", "__remaining_chips", + "__board_x", "__board_y", "__first_chip"] + + def __init__(self, chip): + self.__board_x = chip.nearest_ethernet_x + self.__board_y = chip.nearest_ethernet_y + self.__same_board_chips = OrderedSet() + self.__remaining_chips = OrderedSet() + + def __len__(self): + return len(self.__same_board_chips) + len(self.__remaining_chips) + + def __on_same_board(self, chip): + return (chip.nearest_ethernet_x == self.__board_x and + chip.nearest_ethernet_y == self.__board_y) + + def pop(self): + """ + + :type: Chip + :return: + """ + if self.__same_board_chips: + return self.__same_board_chips.pop(last=False) + if self.__remaining_chips: + next_chip = self.__remaining_chips.pop(last=False) + self.__board_x = next_chip.nearest_ethernet_x + self.__board_y = next_chip.nearest_ethernet_y + to_remove = list() + for chip in self.__remaining_chips: + if self.__on_same_board(chip): + to_remove.append(chip) + self.__same_board_chips.add(chip) + for chip in to_remove: + self.__remaining_chips.remove(chip) + return next_chip + raise StopIteration + + def update(self, chips): + """ + + :param iter(Chip) chips: + """ + for chip in chips: + if self.__on_same_board(chip): + self.__same_board_chips.add(chip) + else: + self.__remaining_chips.add(chip) + + +class _ChipWithSpace(object): + """ A chip with space for placement + """ + + __slots__ = ["chip", "cores", "sdram"] + + def __init__(self, chip, used_processors, used_sdram): + self.chip = chip + self.cores = set(p.processor_id for p in chip.processors + if not p.is_monitor) + self.cores -= used_processors + self.sdram = chip.sdram.size - used_sdram + + @property + def x(self): + return self.chip.x + + @property + def y(self): + return self.chip.y + + def is_space(self, n_cores, sdram): + return len(self.cores) >= n_cores and self.sdram >= sdram + + def use_sdram(self, sdram): + self.sdram -= sdram + + def use_next_core(self): + core = next(iter(self.cores)) + self.cores.remove(core) + return core + + def __repr__(self): + return f"({self.x}, {self.y})" + + +def _chip_order(machine): + """ + + :param machine: + :rtype: Chip + """ + for x in range(machine.max_chip_x + 1): + for y in range(machine.max_chip_y + 1): + chip = machine.get_chip_at(x, y) + if chip: + yield chip diff --git a/pacman/operations/placer_algorithms/connective_based_placer.py b/pacman/operations/placer_algorithms/connective_based_placer.py deleted file mode 100644 index d667c7675..000000000 --- a/pacman/operations/placer_algorithms/connective_based_placer.py +++ /dev/null @@ -1,164 +0,0 @@ -# Copyright (c) 2017-2019 The University of Manchester -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . - -import logging -from spinn_utilities.log import FormatAdapter -from spinn_utilities.progress_bar import ProgressBar -from pacman.model.constraints.placer_constraints import ( - AbstractPlacerConstraint) -from pacman.model.placements import Placements -from pacman.operations.placer_algorithms.radial_placer import _RadialPlacer -from pacman.utilities.algorithm_utilities.placer_algorithm_utilities import ( - sort_vertices_by_known_constraints, get_same_chip_vertex_groups) -from pacman.utilities.utility_calls import locate_constraints_of_type -from pacman.utilities.utility_objs import ResourceTracker - -logger = FormatAdapter(logging.getLogger(__name__)) - - -def connective_based_placer(machine_graph, machine, plan_n_timesteps): - """ - Runs a placer that considers connectivity - - A radial algorithm that can place a machine graph onto a\ - machine using a circle out behaviour from a Ethernet at a given point\ - and which will place things that are most connected closest to each\ - other - - :param MachineGraph machine_graph: The machine_graph to place - :param ~spinn_machine.Machine machine: - The machine with respect to which to partition the application - graph - :param int plan_n_timesteps: number of timesteps to plan for - :return: A set of placements - :rtype: ~pacman.model.placements.Placements - :raise PacmanPlaceException: - If something goes wrong with the placement - """ - placer = _ConnectiveBasedPlacer() - # pylint:disable=protected-access - return placer._run(machine_graph, machine, plan_n_timesteps) - - -class _ConnectiveBasedPlacer(_RadialPlacer): - """ A radial algorithm that can place a machine graph onto a\ - machine using a circle out behaviour from a Ethernet at a given point\ - and which will place things that are most connected closest to each\ - other - """ - - __slots__ = [] - - def _run(self, machine_graph, machine, plan_n_timesteps): - """ - :param MachineGraph machine_graph: The machine_graph to place - :param ~spinn_machine.Machine machine: - The machine with respect to which to partition the application - graph - :param int plan_n_timesteps: number of timesteps to plan for - :return: A set of placements - :rtype: ~pacman.model.placements.Placements - :raise PacmanPlaceException: - If something goes wrong with the placement - """ - # check that the algorithm can handle the constraints - self._check_constraints(machine_graph.vertices) - - # Sort the vertices into those with and those without - # placement constraints - placements = Placements() - constrained = list() - unconstrained = set() - for vertex in machine_graph.vertices: - if locate_constraints_of_type( - vertex.constraints, AbstractPlacerConstraint): - constrained.append(vertex) - else: - unconstrained.add(vertex) - - # Iterate over constrained vertices and generate placements - progress = ProgressBar( - machine_graph.n_vertices, "Placing graph vertices") - resource_tracker = ResourceTracker( - machine, plan_n_timesteps, self._generate_radial_chips(machine)) - constrained = sort_vertices_by_known_constraints(constrained) - vertices_on_same_chip = get_same_chip_vertex_groups(machine_graph) - for vertex in progress.over(constrained, False): - self._place_vertex( - vertex, resource_tracker, machine, placements, - vertices_on_same_chip, machine_graph) - - while unconstrained: - # Place the subgraph with the overall most connected vertex - max_connected_vertex = self._find_max_connected_vertex( - unconstrained, machine_graph) - self._place_unconstrained_subgraph( - max_connected_vertex, machine_graph, unconstrained, - machine, placements, resource_tracker, progress, - vertices_on_same_chip) - - # finished, so stop progress bar and return placements - progress.end() - return placements - - def _place_unconstrained_subgraph( - self, starting_vertex, machine_graph, unplaced_vertices, - machine, placements, resource_tracker, progress, - vertices_on_same_chip): - # pylint: disable=too-many-arguments - # Keep track of all unplaced_vertices connected to the currently - # placed ones - to_do = set() - to_do.add(starting_vertex) - - while to_do: - # Find the vertex most connected of the currently-to-be-placed ones - vertex = self._find_max_connected_vertex(to_do, machine_graph) - - # Place the vertex - self._place_vertex( - vertex, resource_tracker, machine, placements, - vertices_on_same_chip, machine_graph) - progress.update() - - # Remove from collections of unplaced_vertices to work on - unplaced_vertices.remove(vertex) - to_do.remove(vertex) - - # Add all unplaced_vertices connected to this one to the set - for edge in machine_graph.get_edges_ending_at_vertex(vertex): - if edge.pre_vertex in unplaced_vertices: - to_do.add(edge.pre_vertex) - for edge in machine_graph.get_edges_starting_at_vertex(vertex): - if edge.post_vertex in unplaced_vertices: - to_do.add(edge.post_vertex) - - @staticmethod - def _find_max_connected_vertex(vertices, graph): - max_connected_vertex = None - max_weight = 0 - for vertex in vertices: - in_weight = sum( - edge.pre_vertex.vertex_slice.n_atoms - for edge in graph.get_edges_starting_at_vertex(vertex)) - out_weight = sum( - edge.pre_vertex.vertex_slice.n_atoms - for edge in graph.get_edges_ending_at_vertex(vertex)) - weight = in_weight + out_weight - - if max_connected_vertex is None or weight > max_weight: - max_connected_vertex = vertex - max_weight = weight - return max_connected_vertex diff --git a/pacman/operations/placer_algorithms/one_to_one_placer.py b/pacman/operations/placer_algorithms/one_to_one_placer.py deleted file mode 100644 index 07efddd57..000000000 --- a/pacman/operations/placer_algorithms/one_to_one_placer.py +++ /dev/null @@ -1,302 +0,0 @@ -# Copyright (c) 2017-2019 The University of Manchester -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . - -from collections import deque -import functools -from spinn_utilities.progress_bar import ProgressBar -from spinn_utilities.ordered_set import OrderedSet -from pacman.exceptions import ( - PacmanException, PacmanInvalidParameterException, PacmanValueError, - PacmanPlaceException) -from pacman.model.placements import Placement, Placements -from pacman.operations.placer_algorithms.radial_placer import _RadialPlacer -from pacman.utilities.utility_objs import ResourceTracker -from pacman.utilities.algorithm_utilities.placer_algorithm_utilities import ( - create_vertices_groups, get_same_chip_vertex_groups, - get_vertices_on_same_chip, create_requirement_collections) -from pacman.model.constraints.placer_constraints import ( - SameChipAsConstraint, ChipAndCoreConstraint, - RadialPlacementFromChipConstraint) -from pacman.utilities.utility_calls import ( - is_single, locate_constraints_of_type) -from pacman.model.graphs import AbstractVirtual - - -def _conflict(x, y, post_x, post_y): - if x is not None and post_x is not None and x != post_x: - return True - if y is not None and post_y is not None and y != post_y: - return True - return False - - -def one_to_one_placer(machine_graph, machine, plan_n_timesteps): - """ Placer that puts vertices which are directly connected to only its\ - destination on the same chip - - :param MachineGraph machine_graph: The machine_graph to place - :param ~spinn_machine.Machine machine: - The machine with respect to which to partition the application - graph - :param int plan_n_timesteps: number of timesteps to plan for - :return: A set of placements - :rtype: Placements - :raise PacmanPlaceException: - If something goes wrong with the placement - """ - placer = _OneToOnePlacer() - # pylint:disable=protected-access - return placer._run(machine_graph, machine, plan_n_timesteps) - - -class _OneToOnePlacer(_RadialPlacer): - """ Placer that puts vertices which are directly connected to only its\ - destination on the same chip - """ - - __slots__ = [] - - def _run(self, machine_graph, machine, plan_n_timesteps): - """ - :param MachineGraph machine_graph: The machine_graph to place - :param ~spinn_machine.Machine machine: - The machine with respect to which to partition the application - graph - :param int plan_n_timesteps: number of timesteps to plan for - :return: A set of placements - :rtype: Placements - :raise PacmanPlaceException: - If something goes wrong with the placement - """ - # Iterate over vertices and generate placements - # +3 covers check_constraints, get_same_chip_vertex_groups and - # create_vertices_groups - progress = ProgressBar( - machine_graph.n_vertices + 3, "Placing graph vertices") - # check that the algorithm can handle the constraints - self._check_constraints( - machine_graph.vertices, - additional_placement_constraints={SameChipAsConstraint}) - progress.update() - - # Get which vertices must be placed on the same chip as another vertex - same_chip_vertex_groups = get_same_chip_vertex_groups(machine_graph) - progress.update() - - # Work out the vertices that should be on the same chip by one-to-one - # connectivity - one_to_one_groups = create_vertices_groups( - machine_graph.vertices, - functools.partial( - self._find_one_to_one_vertices, graph=machine_graph)) - progress.update() - - return self._do_allocation( - one_to_one_groups, same_chip_vertex_groups, machine, - plan_n_timesteps, machine_graph, progress) - - @staticmethod - def _find_one_to_one_vertices(vertex, graph): - """ Find vertices which have one to one connections with the given\ - vertex, and where their constraints don't force them onto\ - different chips. - - :param MachineGraph graph: - the graph to look for other one to one vertices - :param MachineVertex vertex: - the vertex to use as a basis for one to one connections - :return: set of one to one vertices - :rtype: set(MachineVertex) - """ - # Virtual vertices can't be forced on other chips - if isinstance(vertex, AbstractVirtual): - return [] - found_vertices = OrderedSet() - vertices_seen = {vertex} - - # look for one to ones leaving this vertex - outgoing = graph.get_edges_starting_at_vertex(vertex) - vertices_to_try = deque( - edge.post_vertex for edge in outgoing - if edge.post_vertex not in vertices_seen) - while vertices_to_try: - next_vertex = vertices_to_try.pop() - if next_vertex not in vertices_seen and \ - not isinstance(next_vertex, AbstractVirtual): - vertices_seen.add(next_vertex) - if is_single(graph.get_edges_ending_at_vertex(next_vertex)): - found_vertices.add(next_vertex) - outgoing = graph.get_edges_starting_at_vertex(next_vertex) - vertices_to_try.extend( - edge.post_vertex for edge in outgoing - if edge.post_vertex not in vertices_seen) - - # look for one to ones entering this vertex - incoming = graph.get_edges_ending_at_vertex(vertex) - vertices_to_try = deque( - edge.pre_vertex for edge in incoming - if edge.pre_vertex not in vertices_seen) - while vertices_to_try: - next_vertex = vertices_to_try.pop() - if next_vertex not in vertices_seen: - vertices_seen.add(next_vertex) - if is_single(graph.get_edges_starting_at_vertex(next_vertex)): - found_vertices.add(next_vertex) - incoming = graph.get_edges_ending_at_vertex(next_vertex) - vertices_to_try.extend( - edge.pre_vertex for edge in incoming - if edge.pre_vertex not in vertices_seen) - - found_vertices.update(get_vertices_on_same_chip(vertex, graph)) - return found_vertices - - def _do_allocation( - self, one_to_one_groups, same_chip_vertex_groups, - machine, plan_n_timesteps, machine_graph, progress): - """ - :param list(set(MachineVertex)) one_to_one_groups: - Groups of vertexes that would be nice on same chip - :param same_chip_vertex_groups: - Mapping of Vertex to the Vertex that must be on the same Chip - :type same_chip_vertex_groups: - dict(MachineVertex, collection(MachineVertex)) - :param ~spinn_machine.Machine machine: - The machine with respect to which to partition the application - graph - :param int plan_n_timesteps: number of timesteps to plan for - :param MachineGraph machine_graph: The machine_graph to place - :param ~spinn_utilities.progress_bar.ProgressBar progress: - :rtype: Placements - """ - - placements = Placements() - - resource_tracker = ResourceTracker( - machine, plan_n_timesteps, self._generate_radial_chips(machine)) - all_vertices_placed = set() - - # RadialPlacementFromChipConstraint won't work here - for vertex in machine_graph.vertices: - for constraint in vertex.constraints: - if isinstance(constraint, RadialPlacementFromChipConstraint): - raise PacmanPlaceException( - "A RadialPlacementFromChipConstraint will not work " - "with the OneToOnePlacer algorithm; use the " - "RadialPlacer algorithm instead") - - # Find and place vertices with hard constraints - for vertex in machine_graph.vertices: - if isinstance(vertex, AbstractVirtual): - virtual_p = 0 - while placements.is_processor_occupied( - vertex.virtual_chip_x, vertex.virtual_chip_y, - virtual_p): - virtual_p += 1 - placements.add_placement(Placement( - vertex, vertex.virtual_chip_x, vertex.virtual_chip_y, - virtual_p)) - all_vertices_placed.add(vertex) - elif locate_constraints_of_type( - vertex.constraints, ChipAndCoreConstraint): - self._allocate_same_chip_as_group( - vertex, placements, resource_tracker, - same_chip_vertex_groups, all_vertices_placed, progress, - machine_graph) - - for grouped_vertices in one_to_one_groups: - # Get unallocated vertices and placements of allocated vertices - unallocated = list() - chips = list() - for vert in grouped_vertices: - if vert in all_vertices_placed: - placement = placements.get_placement_of_vertex(vert) - chips.append((placement.x, placement.y)) - else: - unallocated.append(vert) - if not chips: - chips = None - - if 0 < len(unallocated) <=\ - resource_tracker.get_maximum_cores_available_on_a_chip(): - # Try to allocate all vertices to the same chip - self._allocate_one_to_one_group( - resource_tracker, unallocated, progress, placements, chips, - all_vertices_placed, machine_graph) - # if too big or failed go on to other groups first - - # check all have been allocated if not do so now. - for vertex in machine_graph.vertices: - if vertex not in all_vertices_placed: - self._allocate_same_chip_as_group( - vertex, placements, resource_tracker, - same_chip_vertex_groups, all_vertices_placed, - progress, machine_graph) - - progress.end() - return placements - - @staticmethod - def _allocate_one_to_one_group( - resource_tracker, vertices, progress, placements, chips, - all_vertices_placed, machine_graph): - """ - :param ResourceTracker resource_tracker: - :param list(MachineVertex) vertices: - :param ~spinn_utilities.progress_bar.ProgressBar progress: - :param Placements placements: - :param chips: - :type chips: iterable(tuple(int, int)) or None - :param MachineGraph machine_graph: machine graph - :param set(MachineVertex) all_vertices_placed: - :rtype: bool - """ - try: - allocs = resource_tracker.allocate_constrained_group_resources( - create_requirement_collections(vertices, machine_graph), - chips) - - # allocate cores to vertices - for vertex, (x, y, p, _, _) in progress.over( - zip(vertices, allocs), False): - placements.add_placement(Placement(vertex, x, y, p)) - all_vertices_placed.add(vertex) - return True - except (PacmanValueError, PacmanException, - PacmanInvalidParameterException): - return False - - @staticmethod - def _allocate_same_chip_as_group( - vertex, placements, tracker, same_chip_vertex_groups, - all_vertices_placed, progress, machine_graph): - """ - :param MachineVertex vertex: - :param Placements placements: - :param ResourceTracker tracker: - :param dict(MachineVertex,set(MachineVertex)) same_chip_vertex_groups: - :param ~spinn_utilities.progress_bar.ProgressBar progress: - :param MachineGraph machine_graph: - """ - if vertex not in all_vertices_placed: - # get vert's - vertices = same_chip_vertex_groups[vertex] - - resources = tracker.allocate_constrained_group_resources( - create_requirement_collections(vertices, machine_graph)) - - for (x, y, p, _, _), v in progress.over( - zip(resources, vertices), False): - placements.add_placement(Placement(v, x, y, p)) - all_vertices_placed.add(v) diff --git a/pacman/operations/placer_algorithms/radial_placer.py b/pacman/operations/placer_algorithms/radial_placer.py deleted file mode 100644 index 60bcf166b..000000000 --- a/pacman/operations/placer_algorithms/radial_placer.py +++ /dev/null @@ -1,201 +0,0 @@ -# Copyright (c) 2017-2019 The University of Manchester -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . - -from collections import deque -import logging -from spinn_utilities.log import FormatAdapter -from spinn_utilities.progress_bar import ProgressBar -from pacman.model.constraints.placer_constraints import ( - RadialPlacementFromChipConstraint, SameChipAsConstraint) -from pacman.utilities.algorithm_utilities.placer_algorithm_utilities import ( - get_same_chip_vertex_groups, sort_vertices_by_known_constraints, - create_requirement_collections) -from pacman.model.placements import Placement, Placements -from pacman.utilities.utility_objs import ResourceTracker -from pacman.exceptions import PacmanPlaceException - -logger = FormatAdapter(logging.getLogger(__name__)) - - -def radial_placer(machine_graph, machine, plan_n_timesteps): - """ A placement algorithm that can place a machine graph onto a\ - machine choosing chips radiating in a circle from the boot chip - - :param MachineGraph machine_graph: The machine_graph to place - :param ~spinn_machine.Machine machine: - The machine with respect to which to partition the application - graph - :param int plan_n_timesteps: number of timesteps to plan for - :return: A set of placements - :rtype: Placements - :raise PacmanPlaceException: - If something goes wrong with the placement - """ - placer = _RadialPlacer() - # pylint:disable=protected-access - return placer._run(machine_graph, machine, plan_n_timesteps) - - -class _RadialPlacer(object): - """ A placement algorithm that can place a machine graph onto a\ - machine choosing chips radiating in a circle from the boot chip - """ - - def _run(self, machine_graph, machine, plan_n_timesteps): - """ - :param MachineGraph machine_graph: The machine_graph to place - :param ~spinn_machine.Machine machine: - The machine with respect to which to partition the application - graph - :param int plan_n_timesteps: number of timesteps to plan for - :return: A set of placements - :rtype: Placements - :raise PacmanPlaceException: - If something goes wrong with the placement - """ - # check that the algorithm can handle the constraints - self._check_constraints( - machine_graph.vertices, - additional_placement_constraints={SameChipAsConstraint}) - - placements = Placements() - vertices = sort_vertices_by_known_constraints(machine_graph.vertices) - - # Iterate over vertices and generate placements - progress = ProgressBar( - machine_graph.n_vertices, "Placing graph vertices") - resource_tracker = ResourceTracker( - machine, plan_n_timesteps, self._generate_radial_chips(machine)) - vertices_on_same_chip = get_same_chip_vertex_groups(machine_graph) - all_vertices_placed = set() - for vertex in progress.over(vertices): - if vertex not in all_vertices_placed: - vertices_placed = self._place_vertex( - vertex, resource_tracker, machine, placements, - vertices_on_same_chip, machine_graph) - all_vertices_placed.update(vertices_placed) - return placements - - def _check_constraints( - self, vertices, additional_placement_constraints=None): - if additional_placement_constraints is not None: - placement_constraints = additional_placement_constraints - else: - placement_constraints = {} - ResourceTracker.check_constraints( - vertices, additional_placement_constraints=placement_constraints) - - def _place_vertex( - self, vertex, resource_tracker, machine, placements, - vertices_on_same_chip, machine_graph): - """ - :param MachineVertex vertex: - :param ResourceTracker resource_tracker: - :param ~spinn_machine.Machine machine: - :param Placements placements: - :param vertices_on_same_chip: - :type vertices_on_same_chip: dict(MachineVertex, set(MachineVertex)) - :param MachineGraph machine_graph: - :rtype: set(MachineVertex) - """ - vertices = vertices_on_same_chip[vertex] - - # Check for the radial placement constraint - radial_constraints = [c for v in vertices for c in v.constraints if - isinstance(c, RadialPlacementFromChipConstraint)] - start_x, start_y = self._get_start(radial_constraints) - chips = None - if start_x is not None and start_y is not None: - chips = self._generate_radial_chips( - machine, resource_tracker, start_x, start_y) - - if len(vertices) > 1: - assigned_values = \ - resource_tracker.allocate_constrained_group_resources( - create_requirement_collections(vertices, machine_graph), - chips=chips) - for (x, y, p, _, _), vert in zip(assigned_values, vertices): - placement = Placement(vert, x, y, p) - placements.add_placement(placement) - else: - (x, y, p, _, _) = resource_tracker.allocate_constrained_resources( - vertex.resources_required, vertex.constraints, chips=chips) - placement = Placement(vertex, x, y, p) - placements.add_placement(placement) - - return vertices - - @staticmethod - def _get_start(radial_constraints): - """ - :param list(RadialPlacementFromChipConstraint) radial_constraints: - :rtype: tuple(int,int) or tuple(None,None) - """ - x = None - y = None - for constraint in radial_constraints: - if x is None: - x = constraint.x - elif x != constraint.x: - raise PacmanPlaceException("Non-matching constraints") - if y is None: - y = constraint.y - elif y != constraint.y: - raise PacmanPlaceException("Non-matching constraints") - return x, y - - @staticmethod - def _generate_radial_chips( - machine, resource_tracker=None, start_chip_x=None, - start_chip_y=None): - """ Generates the list of chips from a given starting point in a radial\ - format. - - :param ~spinn_machine.Machine machine: the SpiNNaker machine object - :param resource_tracker: - the resource tracker object which contains what resources of the - machine have currently been used - :type resource_tracker: ResourceTracker or None - :param start_chip_x: - The chip x coordinate to start with for radial iteration - :type start_chip_x: int or None - :param start_chip_y: - the chip y coordinate to start with for radial iteration - :type start_chip_y: int or None - :return: list of chips. - :rtype: iterable(tuple(int,int)) - """ - - if start_chip_x is None or start_chip_y is None: - first_chip = machine.boot_chip - else: - first_chip = machine.get_chip_at(start_chip_x, start_chip_y) - done_chips = {first_chip} - search = deque([first_chip]) - while search: - chip = search.pop() - if (resource_tracker is None or - resource_tracker.is_chip_available(chip.x, chip.y)): - yield chip.x, chip.y - - # Examine the links of the chip to find the next chips - for link in chip.router.links: - next_chip = machine.get_chip_at( - link.destination_x, link.destination_y) - - # Don't search done chips again - if next_chip not in done_chips: - search.appendleft(next_chip) - done_chips.add(next_chip) diff --git a/pacman/operations/placer_algorithms/spreader_placer.py b/pacman/operations/placer_algorithms/spreader_placer.py deleted file mode 100644 index 2b4fbb636..000000000 --- a/pacman/operations/placer_algorithms/spreader_placer.py +++ /dev/null @@ -1,457 +0,0 @@ -# Copyright (c) 2017-2019 The University of Manchester -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . - -from collections import defaultdict -import functools -import math -import sys -from spinn_utilities.progress_bar import ProgressBar -from pacman.model.placements import Placement, Placements -from pacman.operations.placer_algorithms.one_to_one_placer import ( - _OneToOnePlacer) -from pacman.utilities.algorithm_utilities.placer_algorithm_utilities import ( - create_vertices_groups, get_same_chip_vertex_groups, - create_requirement_collections) -from pacman.utilities.utility_objs import ResourceTracker -from pacman.model.constraints.placer_constraints import ( - SameChipAsConstraint, ChipAndCoreConstraint) - - -def spreader_placer(machine_graph, machine, n_keys_map, plan_n_timesteps): - """ Places vertices on as many chips as available with a effort to\ - reduce the number of packets being received by the router in total. - - :param MachineGraph machine_graph: the machine graph - :param ~spinn_machine.Machine machine: the SpiNNaker machine - :param AbstractMachinePartitionNKeysMap n_keys_map: - the n keys from partition map - :param int plan_n_timesteps: number of timesteps to plan for - :return: placements. - :rtype: Placements - """ - placer = _SpreaderPlacer() - # pylint:disable=protected-access - return placer._run(machine_graph, machine, n_keys_map, plan_n_timesteps) - - -class _SpreaderPlacer(_OneToOnePlacer): - """ Places vertices on as many chips as available with a effort to\ - reduce the number of packets being received by the router in total. - - :param MachineGraph machine_graph: the machine graph - :param ~spinn_machine.Machine machine: the SpiNNaker machine - :param AbstractMachinePartitionNKeysMap n_keys_map: - the n keys from partition map - :param int plan_n_timesteps: number of timesteps to plan for - :return: placements. - :rtype: Placements - """ - - # number of cycles over the machine graph ( - # 1. same chip, - # 2. 1 to 1, - # 3. sort left overs - # 4 left overs) - ITERATIONS = 4 - - # distinct steps ( - # 1. check constraints, - # 2. same chip sets, - # 3. 1 to 1 sets, - # 4. chip and core) - STEPS = 4 - - # pylint:disable=arguments-differ - def _run(self, machine_graph, machine, n_keys_map, plan_n_timesteps): - """ - :param MachineGraph machine_graph: the machine graph - :param ~spinn_machine.Machine machine: the SpiNNaker machine - :param AbstractMachinePartitionNKeysMap n_keys_map: - the n keys from partition map - :param int plan_n_timesteps: number of timesteps to plan for - :return: placements. - :rtype: Placements - """ - # create progress bar - progress_bar = ProgressBar( - (machine_graph.n_vertices * self.ITERATIONS) + self.STEPS, - "Placing graph vertices via spreading over an entire machine") - - # check that the algorithm can handle the constraints - self._check_constraints( - machine_graph.vertices, - additional_placement_constraints={SameChipAsConstraint}) - progress_bar.update() - - # get same chip groups - same_chip_vertex_groups = get_same_chip_vertex_groups(machine_graph) - progress_bar.update() - # get chip and core placed verts - hard_chip_constraints = self._locate_hard_placement_verts( - machine_graph) - progress_bar.update() - # get one to one groups - one_to_one_groups = create_vertices_groups( - machine_graph.vertices, - functools.partial( - self._find_one_to_one_vertices, graph=machine_graph)) - progress_bar.update() - - # sort chips so that they are radial from a given point and other - # init data structs - chips_in_order = self._determine_chip_list(machine) - resource_tracker = ResourceTracker( - machine, plan_n_timesteps, chips=chips_in_order) - placements = Placements() - placed_vertices = set() - cost_per_chip = defaultdict(int) - progress_bar.update() - - # allocate hard ones - for hard_vertex in hard_chip_constraints: - (x, y, p, _, _) = resource_tracker.allocate_constrained_resources( - hard_vertex.resources_required, hard_vertex.constraints) - placements.add_placement(Placement(hard_vertex, x, y, p)) - placed_vertices.add(hard_vertex) - cost_per_chip[x, y] += self._get_cost( - hard_vertex, machine_graph, n_keys_map) - - # place groups of verts that need the same chip on the same chip, - self._place_same_chip_verts( - same_chip_vertex_groups, chips_in_order, placements, - progress_bar, resource_tracker, placed_vertices, cost_per_chip, - machine_graph, n_keys_map) - - # place 1 group per chip if possible on same chip as any already - # placed verts. if not then radially from it. - self._place_one_to_one_verts( - one_to_one_groups, chips_in_order, placements, progress_bar, - resource_tracker, placed_vertices, cost_per_chip, machine_graph, - n_keys_map, machine) - - # place vertices which don't have annoying placement constraints. - # spread them over the chips so that they have minimal impact on the - # overall incoming packet cost per router. - self._place_left_over_verts( - machine_graph, chips_in_order, placements, progress_bar, - resource_tracker, placed_vertices, cost_per_chip, n_keys_map) - progress_bar.end() - - # return the built placements - return placements - - def _sort_left_over_verts_based_on_incoming_packets( - self, machine_graph, placed_vertices, n_keys_map): - """ sort left overs verts so that the ones with the most costly verts - are at the front of the list - - :param MachineGraph machine_graph: machine graph - :param set(MachineVertex) placed_vertices: the verts already placed - :param AbstractMachinePartitionNKeysMap n_keys_map: - map between partition to n keys. - :return: new list of verts to process. - :rtype: list(MachineVertex) - """ - - vert_list = list() - incoming_size_map = defaultdict(list) - for vertex in machine_graph.vertices: - if vertex not in placed_vertices: - incoming_size = self._get_cost( - vertex, machine_graph, n_keys_map) - incoming_size_map[incoming_size].append(vertex) - sorted_keys = sorted(incoming_size_map.keys(), reverse=True) - for key in sorted_keys: - vert_list.extend(incoming_size_map[key]) - return vert_list - - @staticmethod - def _sort_chips_based_off_incoming_cost(chips, cost_per_chip): - """ sorts chips out so that the chip in front has least incoming cost. - - :param list(tuple(int,int)) chips: iterable of chips to sort - :param cost_per_chip: the map of (x,y) and cost. - :type cost_per_chip: dict(tuple(int, int), int) - :return: iterable of chips in a sorted fashion. - :rtype: list(tuple(int,int)) - """ - - data = sorted(chips, key=lambda chip: cost_per_chip[chip[0], chip[1]]) - return data - - @staticmethod - def _get_cost(vertex, machine_graph, n_keys_map): - """ gets how many packets are to be processed by a given vertex. - - :param MachineVertex vertex: the vertex the get the cost of - :param MachineGraph machine_graph: the machine graph - :param AbstractMachinePartitionNKeysMap n_keys_map: - the map of outgoing partition and n keys down it. - :return: total keys to come into this vertex. - :rtype: int - """ - - # NOTE we going to assume as a worst case scenario that every key is - # sent every time step. but this is obviously not valid often - # handle incoming - total_incoming_keys = 0 - for incoming_partition in \ - machine_graph.get_multicast_edge_partitions_ending_at_vertex( - vertex): - total_incoming_keys += n_keys_map.n_keys_for_partition( - incoming_partition) - - # handle outgoing - out_going_partitions = \ - machine_graph.get_multicast_edge_partitions_starting_at_vertex( - vertex) - for partition in out_going_partitions: - total_incoming_keys += \ - n_keys_map.n_keys_for_partition(partition) - return total_incoming_keys - - @staticmethod - def _locate_hard_placement_verts(machine_graph): - """ locates the verts with hard constraints - - :param MachineGraph machine_graph: the machine graph - :return: list of verts to just place where they demand it - :rtype: list(MachineVertex) - """ - hard_verts = list() - for vertex in machine_graph.vertices: - for constraint in vertex.constraints: - if isinstance(constraint, ChipAndCoreConstraint): - hard_verts.append(vertex) - return hard_verts - - def _place_same_chip_verts( - self, same_chip_vertex_groups, chips_in_order, - placements, progress_bar, resource_tracker, placed_vertices, - cost_per_chip, machine_graph, n_keys_map): - """ places verts which have to be on the same chip on minimum chip. - - :param same_chip_vertex_groups: - groups of verts which want to be on the same chip. - :type same_chip_vertex_groups: dict(MachineVertex, set(MachineVertex)) - :param chips_in_order: chips in radial order from mid machine - :type chips_in_order: iterable(tuple(int,int)) - :param Placements placements: placements holder - :param ~spinn_utilities.progress_bar.ProgressBar progress_bar: - progress bar - :param ResourceTracker resource_tracker: resource tracker - :param set(MachineVertex) placed_vertices: - vertices which have already been placed - :param cost_per_chip: map between (x,y) and the cost of packets - :type cost_per_chip: dict(tuple(int, int), int) - :param MachineGraph machine_graph: - :param AbstractMachinePartitionNKeysMap n_keys_map: - :rtype: None - """ - for vertex in same_chip_vertex_groups.keys(): - if len(same_chip_vertex_groups[vertex]) != 1: - if vertex not in placed_vertices: - to_do_as_group = list() - for other_vert in same_chip_vertex_groups[vertex]: - if other_vert not in placed_vertices: - to_do_as_group.extend( - create_requirement_collections( - [other_vert], machine_graph)) - - # allocate as a group to sorted chips so that ones with - # least incoming packets are considered first - results = \ - resource_tracker.allocate_constrained_group_resources( - to_do_as_group, chips=chips_in_order) - - # create placements and add cost to the chip - for (x, y, p, _, _), placed_vertex in zip( - results, same_chip_vertex_groups[vertex]): - placements.add_placement( - Placement(placed_vertex, x, y, p)) - placed_vertices.add(placed_vertex) - cost_per_chip[x, y] += self._get_cost( - placed_vertex, machine_graph, n_keys_map) - - # resort the chips, as no idea where in the list the resource - # tracker selected - chips_in_order = self._sort_chips_based_off_incoming_cost( - chips_in_order, cost_per_chip) - - # update progress bar to cover one cycle of all the verts in the graph - progress_bar.update(len(machine_graph.vertices)) - - def _place_one_to_one_verts( - self, one_to_one_groups, chips_in_order, placements, progress_bar, - resource_tracker, placed_vertices, cost_per_chip, machine_graph, - n_keys_map, machine): - """ place 1 to 1 groups on the same chip if possible. else radially\ - from it - - :param one_to_one_groups: the 1 to 1 groups - :type one_to_one_groups: iterable(iterable(MachineVertex)) - :param chips_in_order: chips in sorted order of lowest cost - :type chips_in_order: iterable(tuple(int,int)) - :param Placements placements: placements holder - :param ~spinn_utilities.progress_bar.ProgressBar progress_bar: - the progress bar - :param ResourceTracker resource_tracker: the resource tracker - :param set(MachineVertex) placed_vertices: the verts already placed - :param cost_per_chip: map of (x,y) and the incoming packet cost - :type cost_per_chip: dict(tuple(int, int), int) - :param MachineGraph machine_graph: machine graph - :param AbstractMachinePartitionNKeysMap n_keys_map: - map between outgoing partition and n keys down it - :param ~spinn_machine.Machine machine: the SpiNNMachine instance. - """ - - # go through each 1 to 1 group separately - for group in one_to_one_groups: - - # find which cores have already been allocated or not - unallocated = list() - allocated = list() - for one_to_one_vertex in group: - if one_to_one_vertex not in placed_vertices: - unallocated.append(one_to_one_vertex) - else: - allocated.append(one_to_one_vertex) - - # if allocated, then locate which chip to start search at - chips = chips_in_order - if len(allocated) != 0: - x = None - y = None - all_matched = True - # determine if the placed ones are all in the same chip. else - # it doesnt matter. - for vertex in allocated: - placement = placements.get_placement_of_vertex(vertex) - if x is None and y is None: - x = placement.x - y = placement.y - else: - if x != placement.x or y != placement.y: - all_matched = False - - # order chips so that shared chip is first, and the rest are - # nearby it in order. or if not all same, just least first - if all_matched: - chips = list(self._generate_radial_chips( - machine, resource_tracker=None, start_chip_x=x, - start_chip_y=y)) - - # allocate verts. - for one_to_one_vertex in unallocated: - (x, y, p, _, _) = \ - resource_tracker.allocate_constrained_resources( - one_to_one_vertex.resources_required, - one_to_one_vertex.constraints, chips=chips) - - # add to placed tracker - placed_vertices.add(one_to_one_vertex) - - # make placement - placements.add_placement(Placement( - vertex=one_to_one_vertex, x=x, y=y, p=p)) - - # update cost - cost_per_chip[x, y] += self._get_cost( - one_to_one_vertex, machine_graph, n_keys_map) - - # sort chips for the next group cycle - chips_in_order = self._sort_chips_based_off_incoming_cost( - chips, cost_per_chip) - # update progress bar to cover one cycle of all the verts in the graph - progress_bar.update(len(machine_graph.vertices)) - - def _place_left_over_verts( - self, machine_graph, chips_in_order, placements, progress_bar, - resource_tracker, placed_vertices, cost_per_chip, n_keys_map): - """ places left over vertices in locations with least costs. - - :param MachineGraph machine_graph: machine graph - :param chips_in_order: chips in sorted order - :type chips_in_order: iterable(tuple(int,int)) - :param Placements placements: placements - :param ~spinn_utilities.progress_bar.ProgressBar progress_bar: - progress bar - :param ResourceTracker resource_tracker: resource tracker - :param set(MachineVertex) placed_vertices: - the verts which already been placed - :param cost_per_chip: map between (x,y) and the total packets going - through it currently. - :type cost_per_chip: dict(tuple(int, int), int) - :param AbstractMachinePartitionNKeysMap n_keys_map: - map between outgoing partition and n keys down it. - """ - - # locate whatever verts are left - sorted_verts = self._sort_left_over_verts_based_on_incoming_packets( - machine_graph, placed_vertices, n_keys_map) - - for vertex in sorted_verts: - (x, y, p, _, _) = resource_tracker.allocate_constrained_resources( - vertex.resources_required, - vertex.constraints, chips=chips_in_order) - placements.add_placement(Placement(vertex=vertex, x=x, y=y, p=p)) - cost_per_chip[x, y] += self._get_cost( - vertex, machine_graph, n_keys_map) - # sort chips for the next group cycle - chips_in_order = self._sort_chips_based_off_incoming_cost( - chips_in_order, cost_per_chip) - - progress_bar.update(len(machine_graph.vertices)) - - def _determine_chip_list(self, machine): - """ determines the radial list from a deduced middle of the machine - - :param ~spinn_machine.Machine machine: - the machine to find a middle from - :return: a list of chips radially from a deduced middle - :rtype: list(tuple(int,int)) - """ - # try the middle chip - middle_chip_x = math.ceil(machine.max_chip_x / 2) - middle_chip_y = math.ceil(machine.max_chip_y / 2) - chip = machine.get_chip_at(middle_chip_x, middle_chip_y) - - # if middle chip don't exist. search for the closest chip. - if chip is None: - distance_from_middle = sys.maxsize - closest_chip = None - - # compare each chip loc to the middle. don't need to be majorly - # precise, all we're looking for is a chip nearby. - for chip in machine.chips: - x_diff = abs(middle_chip_x - chip.x) - y_diff = abs(middle_chip_y - chip.y) - diff_total = x_diff + y_diff - if distance_from_middle > diff_total: - distance_from_middle = diff_total - closest_chip = chip - - # if you find a chip that's next door, then quit early - if distance_from_middle == 1: - break - - # set correct middle chip - middle_chip_x = closest_chip.x - middle_chip_y = closest_chip.y - - # return the radial list from this middle point - return list(self._generate_radial_chips( - machine, resource_tracker=None, start_chip_x=middle_chip_x, - start_chip_y=middle_chip_y)) diff --git a/pacman/operations/router_algorithms/__init__.py b/pacman/operations/router_algorithms/__init__.py index 2c438703a..962c8439e 100644 --- a/pacman/operations/router_algorithms/__init__.py +++ b/pacman/operations/router_algorithms/__init__.py @@ -15,5 +15,7 @@ from .basic_dijkstra_routing import basic_dijkstra_routing from .ner_route import ner_route, ner_route_traffic_aware +from .application_router import route_application_graph -__all__ = ['basic_dijkstra_routing', 'ner_route', 'ner_route_traffic_aware'] +__all__ = ['basic_dijkstra_routing', 'ner_route', 'ner_route_traffic_aware', + 'route_application_graph'] diff --git a/pacman/operations/router_algorithms/application_router.py b/pacman/operations/router_algorithms/application_router.py new file mode 100644 index 000000000..b16614508 --- /dev/null +++ b/pacman/operations/router_algorithms/application_router.py @@ -0,0 +1,706 @@ +# Copyright (c) 2021 The University of Manchester +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +from pacman.model.routing_table_by_partition import ( + MulticastRoutingTableByPartition, MulticastRoutingTableByPartitionEntry) +from pacman.utilities.algorithm_utilities.routing_algorithm_utilities import ( + longest_dimension_first, get_app_partitions, vertex_xy, + vertex_xy_and_route) +from pacman.utilities.algorithm_utilities.routing_tree import RoutingTree +from pacman.model.graphs.application import ApplicationVertex +from collections import deque, defaultdict +from spinn_utilities.progress_bar import ProgressBar + + +class _Targets(object): + """ A set of targets to be added to a route on a chip(xy) + """ + __slots__ = ["__targets_by_source"] + + def __init__(self): + self.__targets_by_source = defaultdict(lambda: (list(), list())) + + def ensure_source(self, source_vertex): + """ Ensure that a source exists, even if it targets nothing + + :param source_vertex: The vertex to ensure exists + :type source_vertex: ApplicationVertex or MachineVertex + """ + if source_vertex not in self.__targets_by_source: + self.__targets_by_source[source_vertex] = (list(), list()) + + def add_sources_for_target( + self, core, link, source_vertices, partition_id): + """ Add a set of vertices that target a given core or link + + :param core: The core to target with the sources or None if no core + :type core: int or None + :param link: The link to target with the sources or None if no link + :type link: int or None + :param source_vertices: A list of sources which target something here + :type source_vertices: list(ApplicationVertex or MachineVertex) + :param str partition_id: The partition of the sources + """ + for vertex in source_vertices: + if isinstance(vertex, ApplicationVertex): + if self.__is_m_vertex(vertex, partition_id): + self.__add_m_vertices(vertex, partition_id, core, link) + else: + self.__add_source(vertex, core, link) + else: + if vertex.app_vertex in self.__targets_by_source: + self.__replace_app_vertex(vertex.app_vertex, partition_id) + self.__add_source(vertex, core, link) + + def add_machine_sources_for_target( + self, core, link, source_vertices, partition_id): + """ Add a set of machine vertices that target a given core or link + + :param core: The core to target with the sources or None if no core + :type core: int or None + :param link: The link to target with the sources or None if no link + :type link: int or None + :param source_vertices: A list of sources which target something here + :type source_vertices: list(ApplicationVertex or MachineVertex) + :param str partition_id: The partition of the sources + """ + for vertex in source_vertices: + if isinstance(vertex, ApplicationVertex): + if vertex in self.__targets_by_source: + self.__replace_app_vertex(vertex, partition_id) + self.__add_m_vertices(vertex, partition_id, core, link) + else: + if vertex.app_vertex in self.__targets_by_source: + self.__replace_app_vertex(vertex.app_vertex, partition_id) + self.__add_source(vertex, core, link) + + def __is_m_vertex(self, vertex, partition_id): + for m_vert in vertex.splitter.get_out_going_vertices(partition_id): + if m_vert in self.__targets_by_source: + return True + return False + + def __replace_app_vertex(self, vertex, partition_id): + cores = self.__targets_by_source[vertex][0] + links = self.__targets_by_source[vertex][1] + del self.__targets_by_source[vertex] + for m_vertex in vertex.splitter.get_out_going_vertices(partition_id): + self.__targets_by_source[m_vertex] = (cores, links) + + def __add_m_vertices(self, vertex, partition_id, core, link): + for m_vertex in vertex.splitter.get_out_going_vertices(partition_id): + self.__add_source(m_vertex, core, link) + + def __add_source(self, source, core, link): + if core is not None: + self.__targets_by_source[source][0].append(core) + if link is not None: + self.__targets_by_source[source][1].append(link) + + @property + def targets_by_source(self): + """ Get a list of (source, (list of cores, list of links)) to target + + :rtype: tuple(MachineVertex or ApplicationVertex, + tuple(list(int), list(int))) + """ + return self.__targets_by_source.items() + + def get_targets_for_source(self, vertex): + """ Get the cores and links for a specific source + + :return: tuple(list of cores, list of links) + :rtype: tuple(list(int), list(int)) + """ + return vertex, self.__targets_by_source[vertex] + + +def route_application_graph(machine, app_graph, placements): + """ Route an application graph + """ + routing_tables = MulticastRoutingTableByPartition() + + partitions = get_app_partitions(app_graph) + + # Now go through the app edges and route app vertex by app vertex + progress = ProgressBar(len(partitions), "Routing") + for partition in progress.over(partitions): + # Store the source vertex of the partition + source = partition.pre_vertex + + # Pick a place within the source that we can route from. Note that + # this might not end up being the actual source in the end. + source_mappings = _get_outgoing_mapping( + source, partition.identifier, placements, machine) + + # No source mappings? Nothing to route then! + if not source_mappings: + continue + + source_xy = next(iter(source_mappings.keys())) + # Get all source chips coordinates + all_source_xys = _get_all_xys(source, placements, machine) + + # Keep track of the source edge chips + source_edge_xys = set() + + # Keep track of which chips (xys) we have visited with routes for this + # partition to ensure no looping + routes = dict() + + # Keep track of cores or links to target on specific chips (xys) + targets = defaultdict(_Targets) + + # Remember if we see a self-connection + self_connected = False + self_xys = set() + + for edge in partition.edges: + # Store the target vertex + target = edge.post_vertex + + # If not self-connected + if source != target: + + # Find all coordinates for chips (xy) that are in the target + target_xys = _get_all_xys(target, placements, machine) + + # Pick one to actually use as a target + target_xy = _find_target_xy( + target_xys, routes, source_mappings) + + # Make a route between source and target, without any source + # or target chips in it + source_edge_xy, target_edge_xy = _route_pre_to_post( + source_xy, target_xy, routes, machine, + f"Source to Target ({target.label})", all_source_xys, + target_xys) + + # Add all the targets for the route + target_vertices = \ + target.splitter.get_source_specific_in_coming_vertices( + source, partition.identifier) + real_target_xys = set() + for tgt, srcs in target_vertices: + xy, (_vertex, core, link) = vertex_xy_and_route( + tgt, placements, machine) + if xy in source_mappings: + targets[xy].add_machine_sources_for_target( + core, link, srcs, partition.identifier) + else: + targets[xy].add_sources_for_target( + core, link, srcs, partition.identifier) + + real_target_xys.add(xy) + + # Route from target edge chip to all the targets + _route_to_xys( + target_edge_xy, target_xys, machine, routes, + real_target_xys, "Target to Targets") + + # If the start of the route is still part of the source vertex + # chips, add it + if source_edge_xy in source_mappings: + source_edge_xys.add(source_edge_xy) + + # If self-connected + else: + self_connected = True + + # If self-connected, add the targets of the sources + target_vertices = \ + source.splitter.get_source_specific_in_coming_vertices( + source, partition.identifier) + for tgt, srcs in target_vertices: + xy, (_vertex, core, link) = vertex_xy_and_route( + tgt, placements, machine) + targets[xy].add_machine_sources_for_target( + core, link, srcs, partition.identifier) + self_xys.add(xy) + + # Deal with internal multicast partitions + internal = source.splitter.get_internal_multicast_partitions() + if internal: + self_connected = True + for in_part in internal: + src = in_part.pre_vertex + for edge in in_part.edges: + tgt = edge.post_vertex + xy, (_vertex, core, link) = vertex_xy_and_route( + tgt, placements, machine) + targets[xy].add_machine_sources_for_target( + core, link, [src], in_part.identifier) + self_xys.add(xy) + + # Make the real routes from source edges to targets + for source_edge_xy in source_edge_xys: + # Make sure that we add the machine sources on the source edge chip + if source_edge_xy not in targets: + edge_targets = _Targets() + for source_xy in source_mappings: + for vertex, _p, _l in source_mappings[source_xy]: + edge_targets.ensure_source(vertex) + targets[source_edge_xy] = edge_targets + + _convert_a_route( + routing_tables, source, partition.identifier, None, None, + routes[source_edge_xy], targets=targets, + ensure_all_source=True) + + # Now make the routes from actual sources to source edges + if self_connected: + for xy in source_mappings: + source_routes = dict() + _route_to_xys( + xy, all_source_xys, machine, source_routes, + source_edge_xys.union(self_xys), + "Sources to Source (self)") + for vertex, processor, link in source_mappings[xy]: + _convert_a_route( + routing_tables, vertex, partition.identifier, + processor, link, source_routes[xy], targets=targets, + use_source_for_targets=True) + else: + for xy in source_mappings: + source_routes = dict() + _route_to_xys( + xy, all_source_xys, machine, source_routes, + source_edge_xys, "Sources to source") + for vertex, processor, link in source_mappings[xy]: + _convert_a_route( + routing_tables, vertex, partition.identifier, + processor, link, source_routes[xy]) + + # Return the routing tables + return routing_tables + + +def _find_target_xy(target_xys, routes, source_mappings): + """ + + :param set(tuple(int, int)) target_xys: + :param routes: + :param dict(tuple(int, int), list) outgoing_mappings: + :return: + :rtype: tuple(int, int) + """ + for xy in target_xys: + if xy in source_mappings: + return xy + for xy in target_xys: + if xy in routes: + return xy + return xy + + +def _get_outgoing_mapping(app_vertex, partition_id, placements, machine): + """ + Gets a Mapping from xy sources to a list of (vertex, the vertex, + processor and link to follow to get to the vertex + + For each tuple in the list either processor or link will be None + + :param app_vertex: + :param partition_id: + :param placements: + :param machine: + :rtype: dict(tuple(int, int), + list(tuple(MachineVertex, int, None) or + tuple(MachineVertex, None, int))) + """ + outgoing_mapping = defaultdict(list) + for m_vertex in app_vertex.splitter.get_out_going_vertices(partition_id): + xy, route = vertex_xy_and_route(m_vertex, placements, machine) + outgoing_mapping[xy].append(route) + for in_part in app_vertex.splitter.get_internal_multicast_partitions(): + if in_part.identifier == partition_id: + xy, route = vertex_xy_and_route( + in_part.pre_vertex, placements, machine) + outgoing_mapping[xy].append(route) + return outgoing_mapping + + +def _get_all_xys(app_vertex, placements, machine): + """ + Gets the list of all the xy coordinates the vertexes machine vertices + are placed on + + :param app_vertex: + :param placements: + :param machine: + :rtype: set(tuple(int, int)) + """ + return {vertex_xy(m_vertex, placements, machine) + for m_vertex in app_vertex.machine_vertices} + + +def _route_to_xys(first_xy, all_xys, machine, routes, targets, label): + """ + + :param tuple(int, int) first_xy: + :param list(tuple(int, int)) all_xys: + :param machine: + :param routes: + :param targets: + :param label: + """ + # Keep a queue of xy to visit, list of (parent xy, link from parent) + xys_to_explore = deque([(first_xy, list())]) + visited = set() + targets_to_visit = set(targets) + while xys_to_explore: + xy, path = xys_to_explore.popleft() + if xy in targets_to_visit: + targets_to_visit.remove(xy) + if xy in visited: + continue + visited.add(xy) + + # If we have reached a xy that has already been routed to, + # cut the path off here + if xy in routes: + path = list() + + # If we have reached a target, add the path to the routes + elif xy in targets: + routes[xy] = RoutingTree(xy, label) + last_route = routes[xy] + for parent, link in reversed(path): + if parent not in routes: + routes[parent] = RoutingTree(parent, label) + routes[parent].append_child((link, last_route)) + last_route = routes[parent] + + # The path can be reset from here as we have already routed here + path = list() + + for link in range(6): + x, y = xy + if machine.is_link_at(x, y, link): + next_xy = machine.xy_over_link(x, y, link) + if _is_open_chip(next_xy, all_xys, visited, machine): + new_path = list(path) + new_path.append((xy, link)) + xys_to_explore.append((next_xy, new_path)) + # Sanity check + if targets_to_visit: + raise Exception( + f"Failed to visit all targets {targets} from {first_xy}: " + f" Not visited {targets_to_visit}") + + +def _is_open_chip(xy, xys, visited, machine): + """ + + :param tuple(int, int) xy: + :param list(tuple(int, int) xys: List of legal xys + :param set(tuple(int, int) visited: + :param machine: + :return: True if the cooridnates point to an existing Chip not yet visited + """ + return xy in xys and xy not in visited and machine.is_chip_at(*xy) + + +def _route_pre_to_post( + source_xy, dest_xy, routes, machine, label, all_source_xy, + target_xys): + """ + + :param tuple(int, int) source_xy: + :param tuple(int, int) dest_xy: + :param routes: + :param machine: + :param str label: + :param set(tuple(int, int)) all_source_xy: + :param set(tuple(int, int)) target_xys: + :return: the pre and post xy coordinates + :rtype: tuple(tuple(int,int), tuple(int, int)) + """ + # Find a route from source to target + vector = machine.get_vector(source_xy, dest_xy) + nodes_direct = longest_dimension_first(vector, source_xy, machine) + + # Route around broken links and chips + nodes_fixed = _path_without_errors(source_xy, nodes_direct, machine) + + # Start from the end and move backwards until we find a chip + # in the source group, or a already in the route + nodes = nodes_fixed + route_pre = source_xy + for i, (_direction, (x, y)) in reversed(list(enumerate(nodes))): + if _in_group((x, y), all_source_xy) or (x, y) in routes: + nodes = nodes[i + 1:] + route_pre = (x, y) + break + + # If we found one not in the route, create a new entry for it + if route_pre not in routes: + routes[route_pre] = RoutingTree(route_pre, label) + + # Start from the start and move forwards until we find a chip in + # the target group + route_post = dest_xy + for i, (_direction, (x, y)) in enumerate(nodes): + if (x, y) in target_xys: + nodes = nodes[:i + 1] + route_post = (x, y) + break + + # Convert nodes to routes and add to existing routes + source_route = routes[route_pre] + for direction, dest_node in nodes: + if dest_node in routes: + _print_path(routes[source_xy]) + print(f"Direct path from {source_xy} to {dest_xy}: {nodes_direct}") + print(f"Avoiding down chips: {nodes_fixed}") + print(f"Trimmed path is from {route_pre} to {route_post}: {nodes}") + raise Exception( + f"Somehow node {dest_node} already in routes with label" + f" {routes[dest_node].label}") + dest_route = RoutingTree(dest_node, label) + routes[dest_node] = dest_route + source_route.append_child((direction, dest_route)) + source_route = dest_route + + return route_pre, route_post + + +def _path_without_errors(source_xy, nodes, machine): + """ + + :param tuple(int, int) source_xy: + :param list(tuple(int,tuple(int, int))) nodes: + :param machine: + :rtype: list(tuple(int,int)) + """ + c_xy = source_xy + pos = 0 + new_nodes = list() + while pos < len(nodes): + + # While the route is working, move forwards and copy + while (pos < len(nodes) and _is_ok(c_xy, nodes[pos], machine)): + new_nodes.append(nodes[pos]) + c_xy = _xy(nodes[pos]) + pos += 1 + + # While the route is broken, find the next working bit + next_pos = pos + n_xy = c_xy + while (next_pos < len(nodes) and not _is_ok( + n_xy, nodes[next_pos], machine)): + n_xy = _xy(nodes[next_pos]) + next_pos += 1 + + # If there is a broken bit, fix it + if next_pos != pos: + new_nodes.extend(_find_path(c_xy, n_xy, machine)) + c_xy = n_xy + pos = next_pos + return _path_without_loops(source_xy, new_nodes) + + +def _path_without_loops(start_xy, nodes): + """ + + :param tuple(int, int) start_xy: + :param list(tuple(int,int)) nodes: + :rtype: list(tuple(int,int)) + """ + seen_nodes = {start_xy: 0} + i = 0 + while i < len(nodes): + _, nxt = nodes[i] + if nxt in seen_nodes: + last_seen = seen_nodes[nxt] + del nodes[last_seen:i + 1] + i = last_seen + else: + i += 1 + seen_nodes[nxt] = i + return nodes + + +def _is_ok(xy, node, machine): + """ + + :param tuple(int, int) xy: + :param tuple(int,tuple(int, int)) node: + :param machine: + :return: + """ + c_x, c_y = xy + direction, (n_x, n_y) = node + if machine.is_link_at(c_x, c_y, direction): + if machine.is_chip_at(n_x, n_y): + return True + return False + + +def _xy(node): + _, (x, y) = node + return (x, y) + + +def _find_path(source_xy, target_xy, machine): + xys_to_explore = deque([(source_xy, list())]) + visited = set() + while xys_to_explore: + xy, path = xys_to_explore.popleft() + if xy in visited: + continue + visited.add(xy) + + # If we have reached a target, add the path to the routes + if xy == target_xy: + return path + + for link in range(6): + x, y = xy + if machine.is_link_at(x, y, link): + next_xy = machine.xy_over_link(x, y, link) + if _is_open_chip(next_xy, [next_xy], visited, machine): + new_path = list(path) + new_path.append((link, next_xy)) + xys_to_explore.append((next_xy, new_path)) + raise Exception(f"No path from {source_xy} to {target_xy}") + + +def _in_group(item, group): + if group is None: + return False + return item in group + + +def _convert_a_route( + routing_tables, source_vertex, partition_id, first_incoming_processor, + first_incoming_link, first_route, targets=None, + use_source_for_targets=False, ensure_all_source=False): + """ Convert the algorithm specific partition_route back to SpiNNaker and + adds it to the routing_tables. + + :param MulticastRoutingTableByPartition routing_tables: + spinnaker format routing tables + :param source_vertex: The source to be added to the table + :type source_vertex: ApplicationVertex or MachineVertex + :param incoming_processor: processor this link came from + :type incoming_processor: int or None + :param incoming_link: link this link came from + :type incoming_link: int or None + :param RoutingTree route: algorithm specific format of the route + :param targets: + Targets for each chip. When present for a chip, the route links and + cores are added to each entry in the targets. + :type targets: dict(tuple(int,int),_Targets) or None + :param bool use_source_for_targets: + If true, targets for the given source_vertex will be requested; + If false all targets for matching chips will be used. + :param bool ensure_all_source: + If true, ensures that all machine vertices of the source app vertex + are covered in routes that continue forward + """ + + to_process = [(first_incoming_processor, first_incoming_link, first_route)] + while to_process: + incoming_processor, incoming_link, route = to_process.pop() + x, y = route.chip + + processor_ids = list() + link_ids = list() + for (route, next_hop) in route.children: + if route is not None: + link_ids.append(route) + next_incoming_link = (route + 3) % 6 + if next_hop is not None: + to_process.append((None, next_incoming_link, next_hop)) + + if targets is not None and (x, y) in targets: + chip_targets = targets[x, y] + if use_source_for_targets: + targets_by_source = [ + chip_targets.get_targets_for_source(source_vertex)] + else: + targets_by_source = chip_targets.targets_by_source + + # We must ensure that all machine vertices of an app vertex + # are covered! + machine_vertex_sources = set() + app_vertex_source = False + for (source, (add_cores, add_links)) in targets_by_source: + if isinstance(source, ApplicationVertex): + app_vertex_source = True + else: + machine_vertex_sources.add(source) + entry = MulticastRoutingTableByPartitionEntry( + link_ids + add_links, processor_ids + add_cores, + incoming_processor, incoming_link) + _add_routing_entry( + first_route, routing_tables, entry, x, y, source, + partition_id) + + # Now check the coverage of Application and machine vertices + if ensure_all_source and not app_vertex_source: + for m_vert in source_vertex.splitter.get_out_going_vertices( + partition_id): + if m_vert not in machine_vertex_sources: + entry = MulticastRoutingTableByPartitionEntry( + link_ids, processor_ids, incoming_processor, + incoming_link) + _add_routing_entry( + first_route, routing_tables, entry, x, y, m_vert, + partition_id) + else: + entry = MulticastRoutingTableByPartitionEntry( + link_ids, processor_ids, incoming_processor, incoming_link) + _add_routing_entry( + first_route, routing_tables, entry, x, y, source_vertex, + partition_id) + + +def _add_routing_entry( + first_route, routing_tables, entry, x, y, source, partition_id): + try: + routing_tables.add_path_entry(entry, x, y, source, partition_id) + except Exception as e: + print(f"Error adding route: {e}") + _print_path(first_route) + raise e + + +def _print_path(first_route): + to_process = [("", None, first_route)] + last_is_leaf = False + line = "" + while to_process: + prefix, link, route = to_process.pop() + + if last_is_leaf: + line += prefix + + to_add = "" + if link is not None: + to_add += f" -> {link} -> " + to_add += f"{route.chip} ({route.label})" + line += to_add + prefix += " " * len(to_add) + + if route.is_leaf: + # This is a leaf + last_is_leaf = True + print(line) + line = "" + else: + last_is_leaf = False + for direction, next_route in route.children: + to_process.append((prefix, direction, next_route)) diff --git a/pacman/operations/router_algorithms/basic_dijkstra_routing.py b/pacman/operations/router_algorithms/basic_dijkstra_routing.py index 1b48e545f..0569b1fb1 100644 --- a/pacman/operations/router_algorithms/basic_dijkstra_routing.py +++ b/pacman/operations/router_algorithms/basic_dijkstra_routing.py @@ -15,12 +15,16 @@ import logging import sys +from collections import defaultdict from spinn_utilities.log import FormatAdapter from spinn_utilities.progress_bar import ProgressBar +from spinn_utilities.ordered_set import OrderedSet from pacman.exceptions import PacmanRoutingException -from pacman.model.graphs.common import EdgeTrafficType from pacman.model.routing_table_by_partition import ( MulticastRoutingTableByPartition, MulticastRoutingTableByPartitionEntry) +from pacman.utilities.algorithm_utilities.routing_algorithm_utilities import ( + get_app_partitions, vertex_xy_and_route) +from pacman.model.graphs.application import ApplicationVertex logger = FormatAdapter(logging.getLogger(__name__)) infinity = float("inf") @@ -56,15 +60,15 @@ def __init__(self): def basic_dijkstra_routing( - placements, machine, machine_graph, + machine, graph, placements, bw_per_route_entry=BW_PER_ROUTE_ENTRY, max_bw=MAX_BW): """ Find routes between the edges with the allocated information, placed in the given places - :param Placements placements: The placements of the edges :param ~spinn_machine.Machine machine: The machine through which the routes are to be found - :param MachineGraph machine_graph: the machine_graph object + :param ApplicationGraph graph: the graph to route + :param Placements placements: The placements of the edges :param bool use_progress_bar: whether to show a progress bar :return: The discovered routes :rtype: MulticastRoutingTables @@ -73,7 +77,7 @@ def basic_dijkstra_routing( """ router = _BasicDijkstraRouting(machine, bw_per_route_entry, max_bw) # pylint:disable=protected-access - return router._run(placements, machine_graph) + return router._run(placements, graph) class _BasicDijkstraRouting(object): @@ -103,14 +107,14 @@ def __init__(self, machine, bw_per_route_entry, max_bw): self._max_bw = max_bw self._machine = machine - def _run(self, placements, machine_graph): + def _run(self, placements, graph): """ Find routes between the edges with the allocated information, placed in the given places :param Placements placements: The placements of the edges :param ~spinn_machine.Machine machine: The machine through which the routes are to be found - :param MachineGraph machine_graph: the machine_graph object + :param ApplicationGraph graph: the graph object :param bool use_progress_bar: whether to show a progress bar :return: The discovered routes :rtype: MulticastRoutingTableByPartition @@ -122,52 +126,85 @@ def _run(self, placements, machine_graph): tables = self._initiate_dijkstra_tables() self._update_all_weights(nodes_info) - # each vertex represents a core in the board - progress = ProgressBar( - placements.n_placements, "Creating routing entries") + partitions = get_app_partitions(graph) + progress = ProgressBar(len(partitions), "Creating routing entries") - for placement in progress.over(placements.placements): - self._route(placement, placements, machine_graph, - nodes_info, tables) + for partition in progress.over(partitions): + self._route(partition, placements, nodes_info, tables) return self._routing_paths - def _route(self, placement, placements, graph, node_info, tables): + def _route(self, partition, placements, node_info, tables): """ - :param Placement placement: + :param ApplicationEdgePartition partition: :param Placements placements: - :param MachineGraph graph: + :param ApplicationGraph graph: :param dict(tuple(int,int),_NodeInfo) node_info: :param dict(tuple(int,int),_DijkstraInfo) tables: + :param Machine machine: """ # pylint: disable=too-many-arguments - out_going_edges = ( - edge - for edge in graph.get_edges_starting_at_vertex(placement.vertex) - if edge.traffic_type == EdgeTrafficType.MULTICAST) - - dest_chips = set() - edges_to_route = list() - - for edge in out_going_edges: - destination = edge.post_vertex - dest_place = placements.get_placement_of_vertex(destination) - chip = self._machine.get_chip_at(dest_place.x, dest_place.y) - dest_chips.add((chip.x, chip.y)) - edges_to_route.append(edge) - - if dest_chips: - self._update_all_weights(node_info) - self._reset_tables(tables) - tables[placement.x, placement.y].activated = True - tables[placement.x, placement.y].cost = 0 - self._propagate_costs_until_reached_destinations( - tables, node_info, dest_chips, placement.x, placement.y) - - for edge in edges_to_route: - dest = edge.post_vertex - dest_placement = placements.get_placement_of_vertex(dest) - self._retrace_back_to_source( - dest_placement, tables, edge, node_info, placement.p, graph) + source = partition.pre_vertex + + # Destination (xy, core, link) by source machine vertices + destinations = defaultdict(lambda: defaultdict(lambda: (set(), set()))) + dest_chips = defaultdict(set) + + for edge in partition.edges: + target = edge.post_vertex + target_vertices = \ + target.splitter.get_source_specific_in_coming_vertices( + source, partition.identifier) + + for tgt, srcs in target_vertices: + xy, (m_vertex, core, link) = vertex_xy_and_route( + tgt, placements, self._machine) + for src in srcs: + if isinstance(src, ApplicationVertex): + for s in src.splitter.get_out_going_vertices( + partition.identifier): + if core is not None: + destinations[s][xy][0].add(core) + if link is not None: + destinations[s][xy][1].add(link) + dest_chips[s].add(xy) + else: + if core is not None: + destinations[src][xy][0].add(core) + if link is not None: + destinations[src][xy][1].add(link) + dest_chips[src].add(xy) + + outgoing = OrderedSet(source.splitter.get_out_going_vertices( + partition.identifier)) + for in_part in source.splitter.get_internal_multicast_partitions(): + if in_part.identifier == partition.identifier: + outgoing.add(in_part.pre_vertex) + for edge in in_part.edges: + xy, (_tgt, core, link) = vertex_xy_and_route( + edge.post_vertex, placements, self._machine) + if core is not None: + destinations[in_part.pre_vertex][xy][0].add(core) + if link is not None: + destinations[in_part.pre_vertex][xy][1].add(link) + dest_chips[in_part.pre_vertex].add(xy) + + for m_vertex in outgoing: + source_xy, (m_vertex, core, link) = vertex_xy_and_route( + m_vertex, placements, self._machine) + if dest_chips[m_vertex]: + self._update_all_weights(node_info) + self._reset_tables(tables) + tables[source_xy].activated = True + tables[source_xy].cost = 0 + x, y = source_xy + self._propagate_costs_until_reached_destinations( + tables, node_info, dest_chips[m_vertex], x, y) + + for xy in destinations[m_vertex]: + dest_cores, dest_links = destinations[m_vertex][xy] + self._retrace_back_to_source( + xy, dest_cores, dest_links, tables, node_info, core, link, + m_vertex, partition.identifier) def _initiate_node_info(self): """ Set up a dictionary which contains data for each chip in the\ @@ -342,14 +379,15 @@ def _update_neighbour(tables, neighbour, current, source, weight): .format(neighbour.destination_x, neighbour.destination_y)) def _retrace_back_to_source( - self, dest, tables, edge, nodes_info, source_processor, graph): + self, dest_xy, dest_cores, dest_links, tables, nodes_info, + source_processor, source_link, pre_vertex, partition_id): """ :param Placement dest: Destination placement :param dict(tuple(int,int),_DijkstraInfo) tables: :param MachineEdge edge: :param dict(tuple(int,int),_NodeInfo) nodes_info: :param int source_processor: - :param MachineGraph graph: + :param int source_link: :return: the next coordinates to look into :rtype: tuple(int, int) :raise PacmanRoutingException: @@ -359,27 +397,13 @@ def _retrace_back_to_source( goes to a node that's not considered in the weighted search. """ # Set the tracking node to the destination to begin with - x, y = dest.x, dest.y - routing_entry_route_processors = [] - - # if the processor is None, don't add to router path entry - if dest.p is not None: - routing_entry_route_processors.append(dest.p) - routing_entry_route_links = None - - # build the multicast entry - partitions = graph.get_multicast_edge_partitions_starting_at_vertex( - edge.pre_vertex) - - prev_entry = None - for partition in partitions: - if edge in partition: - entry = MulticastRoutingTableByPartitionEntry( - out_going_links=routing_entry_route_links, - outgoing_processors=routing_entry_route_processors) - self._routing_paths.add_path_entry( - entry, dest.x, dest.y, partition) - prev_entry = entry + x, y = dest_xy + + entry = MulticastRoutingTableByPartitionEntry( + dest_links, dest_cores) + self._routing_paths.add_path_entry( + entry, x, y, pre_vertex, partition_id) + prev_entry = entry while tables[x, y].cost != 0: for idx, neighbour in enumerate(nodes_info[x, y].neighbours): @@ -396,7 +420,7 @@ def _retrace_back_to_source( if tables[n_xy].cost is not None: x, y, prev_entry, added = self._create_routing_entry( n_xy, tables, idx, nodes_info, x, y, - prev_entry, edge, graph) + prev_entry, pre_vertex, partition_id) if added: break else: @@ -405,12 +429,15 @@ def _retrace_back_to_source( " did not find a preceding node! Consider increasing " "acceptable discrepancy between sought traceback cost" " and actual cost at node. Terminating...") - prev_entry.incoming_processor = source_processor + if source_processor is not None: + prev_entry.incoming_processor = source_processor + if source_link is not None: + prev_entry.incoming_link = source_link return x, y def _create_routing_entry( self, neighbour_xy, tables, neighbour_index, - nodes_info, x, y, previous_entry, edge, graph): + nodes_info, x, y, previous_entry, pre_vertex, partition_id): """ Create a new routing entry :param tuple(int,int) neighbour_xy: @@ -420,8 +447,6 @@ def _create_routing_entry( :param int x: :param int y: :param MulticastRoutingTableByPartitionEntry previous_entry: - :param MachineEdge edge: - :param MachineGraph graph: :return: x, y, previous_entry, made_an_entry :rtype: tuple(int, int, MulticastRoutingTableByPartitionEntry, bool) :raise PacmanRoutingException: @@ -443,17 +468,13 @@ def _create_routing_entry( if (neighbours_lowest_cost is not None and self._close_enough(neighbours_lowest_cost, chip_sought_cost)): # build the multicast entry - partns = graph.get_multicast_edge_partitions_starting_at_vertex( - edge.pre_vertex) - entry = None - for partition in partns: - if edge in partition: - entry = MulticastRoutingTableByPartitionEntry( - dec_direction, None) - previous_entry.incoming_link = neighbour_index - # add entry for next hop going backwards into path - self._routing_paths.add_path_entry( - entry, neighbour_xy[0], neighbour_xy[1], partition) + entry = MulticastRoutingTableByPartitionEntry( + dec_direction, None) + previous_entry.incoming_link = neighbour_index + # add entry for next hop going backwards into path + self._routing_paths.add_path_entry( + entry, neighbour_xy[0], neighbour_xy[1], pre_vertex, + partition_id) previous_entry = entry made_an_entry = True diff --git a/pacman/operations/router_algorithms/ner_route.py b/pacman/operations/router_algorithms/ner_route.py index d58e11007..fb9a512de 100644 --- a/pacman/operations/router_algorithms/ner_route.py +++ b/pacman/operations/router_algorithms/ner_route.py @@ -25,60 +25,20 @@ https://github.com/project-rig/rig/blob/master/rig/place_and_route/route/utils.py """ -import heapq -import itertools import functools -from collections import deque, defaultdict +from collections import defaultdict from spinn_utilities.progress_bar import ProgressBar -from pacman.exceptions import MachineHasDisconnectedSubRegion -from pacman.model.graphs import ( - AbstractFPGA, AbstractVirtual, AbstractSpiNNakerLink) +from spinn_utilities.ordered_set import OrderedSet from pacman.model.routing_table_by_partition import ( - MulticastRoutingTableByPartition, MulticastRoutingTableByPartitionEntry) -from .routing_tree import RoutingTree - - -def _convert_a_route( - routing_tables, partition, incoming_processor, incoming_link, - partition_route): - """ - Converts the algorithm specific partition_route back to standard spinnaker - and ands it to the routing_tables. - - :param MulticastRoutingTableByPartition routing_tables: - spinnaker format routing tables - :param AbstractSingleSourcePartition partition: \ - Partition this route applies to - :param int or None incoming_processor: processor this link came from - :param int or None incoming_link: link this link came from - :param RoutingTree partition_route: algorithm specific format of the route - """ - x, y = partition_route.chip - - next_hops = list() - processor_ids = list() - link_ids = list() - for (route, next_hop) in partition_route.children: - if route is not None: - next_incoming_link = None - if route >= 6: - # The route was offset as first 6 are the links - processor_ids.append(route - 6) - else: - link_ids.append(route) - next_incoming_link = (route + 3) % 6 - if isinstance(next_hop, RoutingTree): - next_hops.append((next_hop, next_incoming_link)) - - entry = MulticastRoutingTableByPartitionEntry( - link_ids, processor_ids, incoming_processor, incoming_link) - routing_tables.add_path_entry(entry, x, y, partition) - - for next_hop, next_incoming_link in next_hops: - _convert_a_route( - routing_tables, partition, None, next_incoming_link, next_hop) + MulticastRoutingTableByPartition) +from pacman.utilities.algorithm_utilities.routing_algorithm_utilities import ( + route_has_dead_links, avoid_dead_links, convert_a_route, + longest_dimension_first, nodes_to_trees, vertex_xy, targets_by_chip, + least_busy_dimension_first, get_app_partitions, vertex_xy_and_route) +from pacman.model.graphs.application import ApplicationVertex +from pacman.utilities.algorithm_utilities.routing_tree import RoutingTree def _ner_net(src, destinations, machine, vector_to_nodes): @@ -95,13 +55,14 @@ def _ner_net(src, destinations, machine, vector_to_nodes): :param vector_to_nodes: ?????????? :return: A RoutingTree is produced rooted at the source and visiting all - destinations but which does not contain any vertices etc. For - convenience, a dictionary mapping from destination (x, y) coordinates - to the associated RoutingTree is provided to allow the caller to insert - these items. - :rtype: tuple(RoutingTree, dict(tuple(int,int),RoutingTree)) + destinations but which does not contain any vertices etc. + :rtype: RoutingTree """ + # The radius to check for neighbours, and the total number of chips that + # could appear in the radius radius = 20 + n_nodes_radius = 1261 + # Map from (x, y) to RoutingTree objects route = {src: RoutingTree(src)} @@ -115,20 +76,25 @@ def _ner_net(src, destinations, machine, vector_to_nodes): # Try to find a nearby (within radius hops) node in the routing tree # that we can route to (falling back on just routing to the source). - # - # This implementation scans the list of all route nodes created so far - # and finds the closest node which is < radius hops - # (falling back on the origin if no node is closer than radius hops). - - neighbour = None - neighbour_distance = None - for candidate_neighbour in route: - distance = machine.get_vector_length( - candidate_neighbour, destination) - if distance <= radius and ( - neighbour is None or distance < neighbour_distance): - neighbour = candidate_neighbour - neighbour_distance = distance + if len(route) / 3 > n_nodes_radius: + # This implementation scans potential neighbours in an expanding + # radius; this is ~3x faster per iteration than the one below. + for candidate in machine.concentric_xys(radius, destination): + if candidate in route: + neighbour = candidate + break + else: + # This implementation scans the list of all route nodes created so + # far and finds the closest node which is < radius hops. This is + # ~3x slower per iteration than the one above. + neighbour_distance = None + for candidate_neighbour in route: + distance = machine.get_vector_length( + candidate_neighbour, destination) + if distance <= radius and ( + neighbour is None or distance < neighbour_distance): + neighbour = candidate_neighbour + neighbour_distance = distance # Fall back on routing directly to the source if no nodes within radius # hops of the destination was found. @@ -145,7 +111,7 @@ def _ner_net(src, destinations, machine, vector_to_nodes): # the first point where the route intersects with a connected node. nodes = vector_to_nodes(vector, neighbour, machine) i = len(nodes) - for direction, (x, y) in reversed(nodes): + for _direction, (x, y) in reversed(nodes): i -= 1 if (x, y) in route: # We've just bumped into a node which is already part of the @@ -157,293 +123,12 @@ def _ner_net(src, destinations, machine, vector_to_nodes): break # Take the longest dimension first route. - last_node = route[neighbour] - for direction, (x, y) in nodes: - this_node = RoutingTree((x, y)) - route[(x, y)] = this_node - - last_node.append_child((direction, this_node)) - last_node = this_node - - return route[src], route - - -def _is_linked(source, target, direction, machine): - """ - :param tuple(int,int) source: - :param tuple(int,int) target: - :param int direction: - :param ~spinn_machine.Machine machine: - :rtype: bool - """ - s_chip = machine.get_chip_at(source[0], source[1]) - if s_chip is None: - return False - link = s_chip.router.get_link(direction) - if link is None: - return False - if link.destination_x != target[0]: - return False - if link.destination_y != target[1]: - return False - return True - - -def _copy_and_disconnect_tree(root, machine): - """ - Copy a RoutingTree (containing nothing but RoutingTrees), disconnecting - nodes which are not connected in the machine. - - Note that if a dead chip is part of the input RoutingTree, no corresponding - node will be included in the copy. The assumption behind this is that the - only reason a tree would visit a dead chip is because a route passed - through the chip and wasn't actually destined to arrive at that chip. This - situation is impossible to confirm since the input routing trees have not - yet been populated with vertices. The caller is responsible for being - sensible. - - :param RoutingTree root: - The root of the RoutingTree that contains nothing but RoutingTrees - (i.e. no children which are vertices or links). - :param ~spinn_machine.Machine machine: - The machine in which the routes exist - :return: (root, lookup, broken_links) - Where: - * `root` is the new root of the tree - :py:class:`~.RoutingTree` - * `lookup` is a dict {(x, y): - :py:class:`~.RoutingTree`, ...} - * `broken_links` is a set ([(parent, child), ...]) containing all - disconnected parent and child (x, y) pairs due to broken links. - :rtype: tuple(RoutingTree, dict(tuple(int,int),RoutingTree), - set(tuple(tuple(int,int),tuple(int,int)))) - """ - new_root = None - - # Lookup for copied routing tree {(x, y): RoutingTree, ...} - new_lookup = {} - - # List of missing connections in the copied routing tree [(new_parent, - # new_child), ...] - broken_links = set() - - # A queue [(new_parent, direction, old_node), ...] - to_visit = deque([(None, None, root)]) - while to_visit: - new_parent, direction, old_node = to_visit.popleft() - - if machine.is_chip_at(old_node.chip[0], old_node.chip[1]): - # Create a copy of the node - new_node = RoutingTree(old_node.chip) - new_lookup[new_node.chip] = new_node - else: - # This chip is dead, move all its children into the parent node - assert new_parent is not None, \ - "Net cannot be sourced from a dead chip." - new_node = new_parent - - if new_parent is None: - # This is the root node - new_root = new_node - else: - if new_node is not new_parent: - # If this node is not dead, check connectivity to parent - # node (no reason to check connectivity between a dead node - # and its parent). - if _is_linked( - new_parent.chip, new_node.chip, direction, machine): - # Is connected via working link - new_parent.append_child((direction, new_node)) - else: - # Link to parent is dead (or original parent was dead and - # the new parent is not adjacent) - broken_links.add((new_parent.chip, new_node.chip)) - - # Copy children - for child_direction, child in old_node.children: - to_visit.append((new_node, child_direction, child)) - - return new_root, new_lookup, broken_links - - -def _a_star(sink, heuristic_source, sources, machine): - """ Use A* to find a path from any of the sources to the sink. - - Note that the heuristic means that the search will proceed towards - heuristic_source without any concern for any other sources. This means that - the algorithm may miss a very close neighbour in order to pursue its goal - of reaching heuristic_source. This is not considered a problem since 1) the - heuristic source will typically be in the direction of the rest of the tree - and near by and often the closest entity 2) it prevents us accidentally - forming loops in the rest of the tree since we'll stop as soon as we touch - any part of it. - - :param tuple(int,int) sink: (x, y) - :param tuple(int,int) heuristic_source: (x, y) - An element from `sources` which is used as a guiding heuristic for the - A* algorithm. - :param set(tuple(int,int)) sources: set([(x, y), ...]) - :param ~spinn_machine.Machine machine: - :return: [(int, (x, y)), ...] - A path starting with a coordinate in `sources` and terminating at - connected neighbour of `sink` (i.e. the path does not include `sink`). - The direction given is the link down which to proceed from the given - (x, y) to arrive at the next point in the path. - :rtype: list(tuple(int,tuple(int,int))) - """ - # Select the heuristic function to use for distances - heuristic = (lambda the_node: machine.get_vector_length( - the_node, heuristic_source)) - - # A dictionary {node: (direction, previous_node}. An entry indicates that - # 1) the node has been visited and 2) which node we hopped from (and the - # direction used) to reach previous_node. This may be None if the node is - # the sink. - visited = {sink: None} - - # The node which the tree will be reconnected to - selected_source = None - - # A heap (accessed via heapq) of (distance, (x, y)) where distance is the - # distance between (x, y) and heuristic_source and (x, y) is a node to - # explore. - to_visit = [(heuristic(sink), sink)] - while to_visit: - _, node = heapq.heappop(to_visit) - - # Terminate if we've found the destination - if node in sources: - selected_source = node - break - - # Try all neighbouring locations. - for neighbour_link in range(6): # Router.MAX_LINKS_PER_ROUTER - # Note: link identifiers arefrom the perspective of the neighbour, - # not the current node! - neighbour = machine.xy_over_link( - # Same as Router.opposite - node[0], node[1], (neighbour_link + 3) % 6) - - # Skip links which are broken - if not machine.is_link_at( - neighbour[0], neighbour[1], neighbour_link): - continue - - # Skip neighbours who have already been visited - if neighbour in visited: - continue - - # Explore all other neighbours - visited[neighbour] = (neighbour_link, node) - heapq.heappush(to_visit, (heuristic(neighbour), neighbour)) - - # Fail of no paths exist - if selected_source is None: - raise MachineHasDisconnectedSubRegion( - "Could not find path from {} to {}".format( - sink, heuristic_source)) - - # Reconstruct the discovered path, starting from the source we found and - # working back until the sink. - path = [(visited[selected_source][0], selected_source)] - while visited[path[-1][1]][1] != sink: - node = visited[path[-1][1]][1] - direction = visited[node][0] - path.append((direction, node)) - - return path - - -def _route_has_dead_links(root, machine): - """ Quickly determine if a route uses any dead links. - - :param RoutingTree root: - The root of the RoutingTree which contains nothing but RoutingTrees - (i.e. no vertices and links). - :param ~spinn_machine.Machine machine: - The machine in which the routes exist. - :return: True if the route uses any dead/missing links, False otherwise. - :rtype: bool - """ - for _, (x, y), routes in root.traverse(): - chip = machine.get_chip_at(x, y) - for route in routes: - if chip is None: - return True - if not chip.router.is_link(route): - return True - return False - + nodes_to_trees(nodes, neighbour, route) -def _avoid_dead_links(root, machine): - """ Modify a RoutingTree to route-around dead links in a Machine. + return route[src] - Uses A* to reconnect disconnected branches of the tree (due to dead links - in the machine). - :param RoutingTree root: - The root of the RoutingTree which contains nothing but RoutingTrees - (i.e. no vertices and links). - :param ~spinn_machine.Machine machine: - The machine in which the routes exist. - :return: - A new RoutingTree is produced rooted as before. A dictionary mapping - from (x, y) to the associated RoutingTree is provided for convenience - :rtype: tuple(RoutingTree,dict(tuple(int,int),RoutingTree)) - """ - # Make a copy of the RoutingTree with all broken parts disconnected - root, lookup, broken_links = _copy_and_disconnect_tree(root, machine) - - # For each disconnected subtree, use A* to connect the tree to *any* other - # disconnected subtree. Note that this process will eventually result in - # all disconnected subtrees being connected, the result is a fully - # connected tree. - for parent, child in broken_links: - child_chips = set(c.chip for c in lookup[child]) - - # Try to reconnect broken links to any other part of the tree - # (excluding this broken subtree itself since that would create a - # cycle). - path = _a_star(child, parent, - set(lookup).difference(child_chips), - machine) - # Add new RoutingTree nodes to reconnect the child to the tree. - last_node = lookup[path[0][1]] - last_direction = path[0][0] - for direction, (x, y) in path[1:]: - if (x, y) not in child_chips: - # This path segment traverses new ground so we must create a - # new RoutingTree for the segment. - new_node = RoutingTree((x, y)) - # A* will not traverse anything but chips in this tree so this - # assert is meerly a sanity check that this occurred correctly. - assert (x, y) not in lookup, "Cycle created." - lookup[(x, y)] = new_node - else: - # This path segment overlaps part of the disconnected tree - # (A* doesn't know where the disconnected tree is and thus - # doesn't avoid it). To prevent cycles being introduced, this - # overlapped node is severed from its parent and merged as part - # of the A* path. - new_node = lookup[(x, y)] - - # Find the node's current parent and disconnect it. - for node in lookup[child]: # pragma: no branch - dn = [(d, n) for d, n in node.children if n == new_node] - assert len(dn) <= 1 - if dn: - node.remove_child(dn[0]) - # A node can only have one parent so we can stop now. - break - last_node.append_child((last_direction, new_node)) - last_node = new_node - last_direction = direction - last_node.append_child((last_direction, lookup[child])) - - return root, lookup - - -def _do_route(source_vertex, post_vertexes, machine, placements, +def _do_route(source_xy, post_vertexes, machine, placements, vector_to_nodes): """ Routing algorithm based on Neighbour Exploring Routing (NER). @@ -456,7 +141,7 @@ def _do_route(source_vertex, post_vertexes, machine, placements, fully connected, this algorithm will always succeed though no consideration of congestion or routing-table usage is attempted. - :param MachineVertex source_vertex: + :param tuple(int,int) source_xy: :param iterable(MachineVertex) post_vertexes: :param ~spinn_machine.Machine machine: :param Placements placements: @@ -464,170 +149,22 @@ def _do_route(source_vertex, post_vertexes, machine, placements, :return: :rtype: RoutingTree """ - source_xy = _vertex_xy(source_vertex, placements, machine) - destinations = set(_vertex_xy(post_vertex, placements, machine) + destinations = set(vertex_xy(post_vertex, placements, machine) for post_vertex in post_vertexes) # Generate routing tree (assuming a perfect machine) - root, lookup = _ner_net(source_xy, destinations, machine, vector_to_nodes) + root = _ner_net(source_xy, destinations, machine, vector_to_nodes) # Fix routes to avoid dead chips/links - if _route_has_dead_links(root, machine): - root, lookup = _avoid_dead_links(root, machine) - - # Add the sinks in the net to the RoutingTree - for post_vertex in post_vertexes: - tree_node = lookup[_vertex_xy(post_vertex, placements, machine)] - if isinstance(post_vertex, AbstractVirtual): - # Sinks with route-to-endpoint constraints must be routed - # in the according directions. - route = _route_to_endpoint(post_vertex, machine) - tree_node.append_child((route, post_vertex)) - else: - core = placements.get_placement_of_vertex(post_vertex).p - if core is not None: - # Offset the core by 6 as first 6 are the links - tree_node.append_child((core + 6, post_vertex)) - else: - # Sinks without that resource are simply included without - # an associated route - tree_node.append_child((None, post_vertex)) + if route_has_dead_links(root, machine): + root = avoid_dead_links(root, machine) return root -def _vertex_xy(vertex, placements, machine): - """ - :param MachineVertex vertex: - :param Placements placements: - :param ~spinn_machine.Machine machine: - :rtype: tuple(int,int) - """ - if not isinstance(vertex, AbstractVirtual): - placement = placements.get_placement_of_vertex(vertex) - return placement.x, placement.y - link_data = None - if isinstance(vertex, AbstractFPGA): - link_data = machine.get_fpga_link_with_id( - vertex.fpga_id, vertex.fpga_link_id, vertex.board_address) - elif isinstance(vertex, AbstractSpiNNakerLink): - link_data = machine.get_spinnaker_link_with_id( - vertex.spinnaker_link_id, vertex.board_address) - return link_data.connected_chip_x, link_data.connected_chip_y - - -def _route_to_endpoint(vertex, machine): - """ - :param MachineVertex vertex: - :param ~spinn_machine.Machine machine: - :rtype: int - """ - if isinstance(vertex, AbstractFPGA): - link_data = machine.get_fpga_link_with_id( - vertex.fpga_id, vertex.fpga_link_id, vertex.board_address) - else: - link_data = machine.get_spinnaker_link_with_id( - vertex.spinnaker_link_id, vertex.board_address) - return link_data.connected_link - - -def _least_busy_dimension_first(traffic, vector, start, machine): - """ List the (x, y) steps on a route that goes through the least busy\ - routes first. - - :param traffic: A dictionary of (x, y): count of routes - :param vector: (x, y, z) - The vector which the path should cover. - :param start: (x, y) - The coordinates from which the path should start (note this is a 2D - coordinate). - :param machine:the spinn machine. - :return: min route - """ - - # Go through and find the sum of traffic depending on the route taken - min_sum = 0 - min_route = None - for order in itertools.permutations([0, 1, 2]): - dm_vector = [(i, vector[i]) for i in order] - route = _get_route(dm_vector, start, machine) - sum_traffic = sum(traffic[x, y] for _, (x, y) in route) - if min_route is None or min_sum > sum_traffic: - min_sum = sum_traffic - min_route = route - - for _, (x, y) in min_route: - traffic[x, y] += 1 - - return min_route - - -def _longest_dimension_first(vector, start, machine): - """ - List the (x, y) steps on a longest-dimension first route. - - :param tuple(int,int,int) vector: (x, y, z) - The vector which the path should cover. - :param tuple(int,int) start: (x, y) - The coordinates from which the path should start (note this is a 2D - coordinate). - :param ~spinn_machine.Machine machine: - :return: - :rtype: list(tuple(int,int)) - """ - return _get_route( - sorted(enumerate(vector), key=(lambda x: abs(x[1])), reverse=True), - start, machine) - - -def _get_route(dm_vector, start, machine): - x, y = start - - out = [] - - for dimension, magnitude in dm_vector: - if magnitude == 0: - continue - - if dimension == 0: # x - if magnitude > 0: - # Move East (0) magnitude times - for _ in range(magnitude): - x, y = machine.xy_over_link(x, y, 0) - out.append((0, (x, y))) - else: - # Move West (3) -magnitude times - for _ in range(magnitude, 0): - x, y = machine.xy_over_link(x, y, 3) - out.append((3, (x, y))) - elif dimension == 1: # y - if magnitude > 0: - # Move North (2) magnitude times - for _ in range(magnitude): - x, y = machine.xy_over_link(x, y, 2) - out.append((2, (x, y))) - else: - # Move South (5) -magnitude times - for _ in range(magnitude, 0): - x, y = machine.xy_over_link(x, y, 5) - out.append((5, (x, y))) - else: # z - if magnitude > 0: - # Move SouthWest (4) magnitude times - for _ in range(magnitude): - x, y = machine.xy_over_link(x, y, 4) - out.append((4, (x, y))) - else: - # Move NorthEast (1) -magnitude times - for _ in range(magnitude, 0): - x, y = machine.xy_over_link(x, y, 1) - out.append((1, (x, y))) - return out - - -def _ner_route(machine_graph, machine, placements, vector_to_nodes): +def _ner_route(graph, machine, placements, vector_to_nodes): """ Performs routing using rig algorithm - :param MachineGraph machine_graph: + :param ApplicationGraph graph: :param ~spinn_machine.Machine machine: :param Placements placements: :return: @@ -635,46 +172,68 @@ def _ner_route(machine_graph, machine, placements, vector_to_nodes): """ routing_tables = MulticastRoutingTableByPartition() - progress_bar = ProgressBar(len(machine_graph.vertices), "Routing") - - for source_vertex in progress_bar.over(machine_graph.vertices): - # handle the vertex edges - for partition in machine_graph.\ - get_multicast_edge_partitions_starting_at_vertex( - source_vertex): - post_vertexes = list( - e.post_vertex for e in partition.edges) + partitions = get_app_partitions(graph) + + progress_bar = ProgressBar(len(partitions), "Routing") + + for partition in progress_bar.over(partitions): + + source = partition.pre_vertex + post_vertices_by_source = defaultdict(OrderedSet) + for edge in partition.edges: + splitter = edge.post_vertex.splitter + target_vertices = splitter.get_source_specific_in_coming_vertices( + source, partition.identifier) + for tgt, srcs in target_vertices: + for src in srcs: + if isinstance(src, ApplicationVertex): + for s in src.splitter.get_out_going_vertices( + partition.identifier): + post_vertices_by_source[s].add(tgt) + + outgoing = OrderedSet(source.splitter.get_out_going_vertices( + partition.identifier)) + for in_part in source.splitter.get_internal_multicast_partitions(): + if in_part.identifier == partition.identifier: + outgoing.add(in_part.pre_vertex) + for edge in in_part.edges: + post_vertices_by_source[in_part.pre_vertex].add( + edge.post_vertex) + + for m_vertex in outgoing: + post_vertexes = post_vertices_by_source[m_vertex] + source_xy, (m_vertex, core, link) = vertex_xy_and_route( + m_vertex, placements, machine) routing_tree = _do_route( - source_vertex, post_vertexes, machine, placements, + source_xy, post_vertexes, machine, placements, vector_to_nodes) - incoming_processor = placements.get_placement_of_vertex( - partition.pre_vertex).p - _convert_a_route( - routing_tables, partition, incoming_processor, None, - routing_tree) + targets = targets_by_chip(post_vertexes, placements, machine) + convert_a_route( + routing_tables, m_vertex, partition.identifier, + core, link, routing_tree, targets) progress_bar.end() return routing_tables -def ner_route(machine_graph, machine, placements): +def ner_route(machine, graph, placements): """ basic ner router - :param MachineGraph machine_graph: the machine graph + :param ApplicationGraph graph: the graph :param ~spinn_machine.Machine machine: spinnaker machine :param Placements placements: the placements :return: a routing table by partition :rtype: MulticastRoutingTableByPartition """ return _ner_route( - machine_graph, machine, placements, _longest_dimension_first) + graph, machine, placements, longest_dimension_first) -def ner_route_traffic_aware(machine_graph, machine, placements): +def ner_route_traffic_aware(machine, graph, placements): """ traffic-aware ner router - :param MachineGraph machine_graph: the machine graph + :param ApplicationGraph graph: the graph :param ~spinn_machine.Machine machine: spinnaker machine :param Placements placements: the placements :return: a routing table by partition @@ -682,5 +241,5 @@ def ner_route_traffic_aware(machine_graph, machine, placements): """ traffic = defaultdict(lambda: 0) return _ner_route( - machine_graph, machine, placements, - functools.partial(_least_busy_dimension_first, traffic)) + graph, machine, placements, + functools.partial(least_busy_dimension_first, traffic)) diff --git a/pacman/operations/router_compressors/ranged_compressor.py b/pacman/operations/router_compressors/ranged_compressor.py index bc1f323fe..fdaaee5da 100644 --- a/pacman/operations/router_compressors/ranged_compressor.py +++ b/pacman/operations/router_compressors/ranged_compressor.py @@ -90,7 +90,7 @@ def compress_table(self, uncompressed): return uncompressed # Step 1 get the entries and make sure they are sorted by key - self._entries = uncompressed.multicast_routing_entries + self._entries = list(uncompressed.multicast_routing_entries) self._entries.sort(key=lambda x: x.routing_entry_key) if not self._validate(): return uncompressed diff --git a/pacman/operations/routing_info_allocator_algorithms/malloc_based_routing_allocator/__init__.py b/pacman/operations/routing_info_allocator_algorithms/malloc_based_routing_allocator/__init__.py deleted file mode 100644 index eeb9dfb90..000000000 --- a/pacman/operations/routing_info_allocator_algorithms/malloc_based_routing_allocator/__init__.py +++ /dev/null @@ -1,18 +0,0 @@ -# Copyright (c) 2017-2019 The University of Manchester -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . -from .malloc_based_routing_info_allocator import ( - malloc_based_routing_info_allocator) - -__all__ = ["malloc_based_routing_info_allocator"] diff --git a/pacman/operations/routing_info_allocator_algorithms/malloc_based_routing_allocator/key_field_generator.py b/pacman/operations/routing_info_allocator_algorithms/malloc_based_routing_allocator/key_field_generator.py deleted file mode 100644 index 808a98748..000000000 --- a/pacman/operations/routing_info_allocator_algorithms/malloc_based_routing_allocator/key_field_generator.py +++ /dev/null @@ -1,210 +0,0 @@ -# Copyright (c) 2017-2019 The University of Manchester -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . - -import numpy -from pacman.utilities.utility_calls import ( - compress_bits_from_bit_array, compress_from_bit_array, expand_to_bit_array) -from pacman.utilities.utility_objs import Field -from pacman.exceptions import PacmanRouteInfoAllocationException - - -class KeyFieldGenerator(object): - """ Handle fields in a routing key. - """ - - __slots__ = [ - # The fixed mask over which to generate keys - "_fixed_mask", - - # True if there is another key to be read - "_is_next_key", - - # The list of free spaces to constrain to - "_free_space_list", - - # The position in the free space list - "_free_space_pos", - - # True if the next key has been read, False if not - "_next_key_read", - - # The number of keys possible given the mask - "_n_mask_keys", - - # The fields of the mask to constrain to - "_fields", - - # The indices of the ones in the fields - "_field_ones", - - # The next valid value of the field - "_field_value" - ] - - def __init__(self, fixed_mask, fields, free_space_list): - """ - :param int fixed_mask: - :param fields: - :type fields: list(Field) or None - :param list(ElementFreeSpace) free_space_list: - """ - - self._fixed_mask = fixed_mask - self._is_next_key = True - self._free_space_list = free_space_list - self._free_space_pos = 0 - self._next_key_read = False - self._field_ones = dict() - self._field_value = dict() - - expanded_mask = expand_to_bit_array(fixed_mask) - zeros = numpy.where(expanded_mask == 0)[0] - self._n_mask_keys = 2 ** len(zeros) - - # If there are no fields, add the mask as a field - the_fields = fields - if fields is None or not fields: - n_ones = 32 - len(zeros) - field_max = (2 ** n_ones) - 1 - the_fields = [Field(0, field_max, fixed_mask)] - - # Check that the fields don't cross each other - for idx, field in enumerate(the_fields): - for other_field in the_fields[idx+1:]: - if field != other_field and field.mask & other_field.mask != 0: - raise PacmanRouteInfoAllocationException( - "Field masks {} and {} overlap".format( - field.mask, other_field.mask)) - - # Sort the fields by highest bit range first - self._fields = sorted(the_fields, key=lambda field: field.value, - reverse=True) - - self._update_next_valid_fields() - self._increment_space_until_valid_key() - - def _get_current_space_end_address(self): - current_space = self._free_space_list[self._free_space_pos] - return current_space.start_address + current_space.size - - def _increment_space_until_valid_key(self): - while (self._is_next_key and self._get_next_key() >= - self._get_current_space_end_address()): - self._free_space_pos += 1 - self._update_next_valid_fields() - - def _update_next_valid_fields(self): - - # Find the next valid key for the general mask - min_key = self._free_space_list[self._free_space_pos].start_address - if min_key & self._fixed_mask != min_key: - min_key = (min_key + self._n_mask_keys) & self._fixed_mask - - # Generate a set of indices of ones for each field, and then store - # the current value of each field given the minimum key (even if the - # value might be out of range for the key - see later for fix for this) - for field in self._fields: - expanded_mask = expand_to_bit_array(field.value) - field_ones = numpy.where(expanded_mask == 1)[0] - self._field_ones[field] = field_ones - field_min_key = min_key & field.value - field_min_value = compress_bits_from_bit_array( - expand_to_bit_array(field_min_key), field_ones) - self._field_value[field] = field_min_value - - # Update the values (other than the top value) to be valid - for field_no in reversed(range(1, len(self._fields))): - field = self._fields[field_no] - previous_field = self._fields[field_no - 1] - - # If this value is too small, set it to its minimum - if self._field_value[field] < field.lo: - self._field_value[field] = field.lo - - # If this value is too large, set it to its minimum - # and up the value of the next field - if self._field_value[field] > field.hi: - self._field_value[field] = field.lo - self._field_value[previous_field] += 1 - - # If the top value is above its valid range, there are no valid keys - top_field = self._fields[0] - if self._field_value[top_field] > top_field.hi: - self._is_next_key = False - - # If the top value is below its valid range, set it to the first valid - # value - if self._field_value[top_field] < top_field.lo: - self._field_value[top_field] = top_field.lo - - def _increment_key(self): - - # Update the key - fields_updated = False - field_no = len(self._fields) - 1 - while not fields_updated and field_no >= 0: - field = self._fields[field_no] - self._field_value[field] = self._field_value[field] + 1 - if self._field_value[field] > field.hi: - self._field_value[field] = field.lo - field_no -= 1 - else: - fields_updated = True - - # If the first field is now too big, there are no more keys - first_field = self._fields[0] - if self._field_value[first_field] > first_field.hi: - self._is_next_key = False - - self._increment_space_until_valid_key() - - def _get_next_key(self): - - # Form the key from the value of the fields - expanded_key = numpy.zeros(32, dtype="uint8") - for field in self._fields: - field_ones = self._field_ones[field] - expanded_value = expand_to_bit_array(self._field_value[field]) - expanded_key[field_ones] = expanded_value[-len(field_ones):] - key = compress_from_bit_array(expanded_key) - - # Return the generated key - return key - - @property - def is_next_key(self): - if self._next_key_read: - self._increment_key() - self._next_key_read = False - return self._is_next_key - - @property - def next_key(self): - - # If there are no more keys, return None - if not self._is_next_key: - return None - self._next_key_read = True - return self._get_next_key() - - def __iter__(self): - return self - - def next(self): - if not self.is_next_key: - raise StopIteration - return self.next_key - - __next__ = next diff --git a/pacman/operations/routing_info_allocator_algorithms/malloc_based_routing_allocator/malloc_based_routing_info_allocator.py b/pacman/operations/routing_info_allocator_algorithms/malloc_based_routing_allocator/malloc_based_routing_info_allocator.py deleted file mode 100644 index a021c8d09..000000000 --- a/pacman/operations/routing_info_allocator_algorithms/malloc_based_routing_allocator/malloc_based_routing_info_allocator.py +++ /dev/null @@ -1,307 +0,0 @@ -# Copyright (c) 2017-2019 The University of Manchester -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . - -import logging -from spinn_utilities.progress_bar import ProgressBar -from spinn_utilities.log import FormatAdapter -from pacman.model.constraints.key_allocator_constraints import ( - AbstractKeyAllocatorConstraint, ShareKeyConstraint, FixedMaskConstraint, - FixedKeyAndMaskConstraint, ContiguousKeyRangeContraint) -from .key_field_generator import KeyFieldGenerator -from pacman.model.routing_info import ( - RoutingInfo, BaseKeyAndMask, PartitionRoutingInfo) -from pacman.utilities.utility_calls import ( - check_algorithm_can_support_constraints, get_key_ranges) -from pacman.utilities.algorithm_utilities import ElementAllocatorAlgorithm -from pacman.utilities.algorithm_utilities.routing_info_allocator_utilities \ - import (check_types_of_edge_constraint, get_mulitcast_edge_groups) -from pacman.exceptions import PacmanRouteInfoAllocationException -from .utils import get_possible_masks - -logger = FormatAdapter(logging.getLogger(__name__)) - - -def malloc_based_routing_info_allocator(machine_graph, n_keys_map): - """ - A Routing Info Allocation Allocator algorithm that keeps track of\ - free keys and attempts to allocate them as requested. - - :param MachineGraph machine_graph: - :param AbstractMachinePartitionNKeysMap n_keys_map: - :rtype: RoutingInfo - :raises PacmanRouteInfoAllocationException: - """ - allocator = _MallocBasedRoutingInfoAllocator(n_keys_map) - return allocator.run(machine_graph) - - -class _MallocBasedRoutingInfoAllocator(ElementAllocatorAlgorithm): - """ A Routing Info Allocation Allocator algorithm that keeps track of\ - free keys and attempts to allocate them as requested. - """ - - __slots__ = ["_n_keys_map"] - - def __init__(self, n_keys_map): - super().__init__(0, 2 ** 32) - self._n_keys_map = n_keys_map - - def run(self, machine_graph): - """ - :param MachineGraph machine_graph: - :param AbstractMachinePartitionNKeysMap n_keys_map: - :rtype: RoutingInfo - :raises PacmanRouteInfoAllocationException: - """ - # check that this algorithm supports the constraints - check_algorithm_can_support_constraints( - constrained_vertices=machine_graph.outgoing_edge_partitions, - supported_constraints=[ - FixedMaskConstraint, - FixedKeyAndMaskConstraint, - ContiguousKeyRangeContraint, ShareKeyConstraint], - abstract_constraint_type=AbstractKeyAllocatorConstraint) - - # verify that no edge has more than 1 of a constraint ,and that - # constraints are compatible - check_types_of_edge_constraint(machine_graph) - - # final keys allocations - routing_infos = RoutingInfo() - - # Get the edges grouped by those that require the same key - (fixed_keys, shared_keys, fixed_masks, fixed_fields, continuous, - noncontinuous) = get_mulitcast_edge_groups(machine_graph) - - # Go through the groups and allocate keys - progress = ProgressBar( - machine_graph.n_outgoing_edge_partitions, - "Allocating routing keys") - - # allocate the groups that have fixed keys - for group in progress.over(fixed_keys, False): - self._allocate_fixed_keys(group, routing_infos) - - for group in progress.over(fixed_masks, False): - self._allocate_fixed_masks(group, routing_infos) - - for group in progress.over(fixed_fields, False): - self._allocate_fixed_fields(group, routing_infos) - - for group in progress.over(shared_keys, False): - self._allocate_share_key(group, routing_infos) - - for group in continuous: - self._allocate_other_groups(group, routing_infos, True) - - for group in noncontinuous: - self._allocate_other_groups(group, routing_infos, False) - - progress.end() - return routing_infos - - def __get_n_keys(self, group): - """ - :param ConstraintGroup group: - :rtype: int - """ - return max( - self._n_keys_map.n_keys_for_partition(partition) - for partition in group) - - def _allocate_other_groups(self, group, routing_infos, continuous): - """ - :param ConstraintGroup group: - :param RoutingInfo routing_infos: - :param bool continuous: - """ - keys_and_masks = self._allocate_keys_and_masks( - None, None, self.__get_n_keys(group), - contiguous_keys=continuous) - for partition in group: - self._update_routing_objects( - keys_and_masks, routing_infos, partition) - - def _allocate_share_key(self, group, routing_infos): - """ - :param ConstraintGroup group: - :param RoutingInfo routing_infos: - """ - keys_and_masks = self._allocate_keys_and_masks( - None, None, self.__get_n_keys(group)) - - for partition in group: - # update the pacman data objects - self._update_routing_objects(keys_and_masks, routing_infos, - partition) - - def _allocate_fixed_keys(self, group, routing_infos): - """ - :param ConstraintGroup group: - :param RoutingInfo routing_infos: - """ - # Get any fixed keys and masks from the group and attempt to - # allocate them - fixed_key_and_mask_constraint = group.constraint - - fixed_mask = None - self._allocate_fixed_keys_and_masks( - fixed_key_and_mask_constraint.keys_and_masks, fixed_mask) - - for partition in group: - # update the pacman data objects - self._update_routing_objects( - fixed_key_and_mask_constraint.keys_and_masks, routing_infos, - partition) - - def _allocate_fixed_masks(self, group, routing_infos): - """ - :param ConstraintGroup group: - :param RoutingInfo routing_infos: - """ - # get mask and fields if need be - fixed_mask = group.constraint.mask - - # try to allocate - keys_and_masks = self._allocate_keys_and_masks( - fixed_mask, None, self.__get_n_keys(group)) - - for partition in group: - # update the pacman data objects - self._update_routing_objects( - keys_and_masks, routing_infos, partition) - - def _allocate_fixed_fields(self, group, routing_infos): - """ - :param ConstraintGroup group: - :param RoutingInfo routing_infos: - """ - fields = group.constraint.fields - - # try to allocate - keys_and_masks = self._allocate_keys_and_masks( - None, fields, self.__get_n_keys(group)) - - for partition in group: - # update the pacman data objects - self._update_routing_objects( - keys_and_masks, routing_infos, partition) - - @staticmethod - def _update_routing_objects(keys_and_masks, routing_infos, group): - """ - :param iterable(BaseKeyAndMask) keys_and_masks: - :param RoutingInfo routing_infos: - :param ConstraintGroup group: - """ - # Allocate the routing information - partition_info = PartitionRoutingInfo(keys_and_masks, group) - routing_infos.add_partition_info(partition_info) - - def _allocate_fixed_keys_and_masks(self, keys_and_masks, fixed_mask): - """ Allocate fixed keys and masks. - - :param iterable(BaseKeyAndMask) keys_and_masks: - the fixed keys and masks combos - :param fixed_mask: fixed mask - :type fixed_mask: int or None - :rtype: None - :raises PacmanRouteInfoAllocationException: - """ - # If there are fixed keys and masks, allocate them - for key_and_mask in keys_and_masks: - # If there is a fixed mask, check it doesn't clash - if fixed_mask is not None and fixed_mask != key_and_mask.mask: - raise PacmanRouteInfoAllocationException( - "Cannot meet conflicting constraints") - - # Go through the mask sets and allocate - for key, n_keys in get_key_ranges( - key_and_mask.key, key_and_mask.mask): - self._allocate_elements(key, n_keys) - - def _allocate_keys_and_masks(self, fixed_mask, fields, partition_n_keys, - contiguous_keys=True): - """ - :param fixed_mask: - :type fixed_mask: int or None - :param fields: - :type fields: iterable(Field) or None - :param int partition_n_keys: - :param bool contiguous_keys: - :rtype: list(BaseKeyAndMask) - :raises PacmanRouteInfoAllocationException: - """ - # If there isn't a fixed mask, generate a fixed mask based - # on the number of keys required - masks_available = [fixed_mask] - if fixed_mask is None: - masks_available = get_possible_masks( - partition_n_keys, contiguous_keys=contiguous_keys) - - # For each usable mask, try all of the possible keys and - # see if a match is possible - mask_found = None - key_found = None - mask = None - for mask in masks_available: - logger.debug("Trying mask {} for {} keys", - hex(mask), partition_n_keys) - - key_found = None - key_generator = KeyFieldGenerator( - mask, fields, self._free_space_tracker) - for key in key_generator: - logger.debug("Trying key {}", hex(key)) - - # Check if all the key ranges can be allocated - matched_all = True - index = 0 - for (base_key, n_keys) in get_key_ranges(key, mask): - logger.debug("Finding slot for {}, n_keys={}", - hex(base_key), n_keys) - index = self._find_slot(base_key, lo=index) - logger.debug("Slot for {} is {}", hex(base_key), index) - if index is None: - matched_all = False - break - space = self._check_allocation(index, base_key, n_keys) - logger.debug("Space for {} is {}", hex(base_key), space) - if space is None: - matched_all = False - break - - if matched_all: - logger.debug("Matched key {}", hex(key)) - key_found = key - break - - # If we found a matching key, store the mask that worked - if key_found is not None: - logger.debug("Matched mask {}", hex(mask)) - mask_found = mask - break - - # If we found a working key and mask that can be assigned, - # Allocate them - if key_found is not None and mask_found is not None: - for (base_key, n_keys) in get_key_ranges(key_found, mask): - self._allocate_elements(base_key, n_keys) - - # If we get here, we can assign the keys to the edges - return [BaseKeyAndMask(base_key=key_found, mask=mask)] - - raise PacmanRouteInfoAllocationException( - "Could not find space to allocate keys") diff --git a/pacman/operations/routing_info_allocator_algorithms/malloc_based_routing_allocator/utils.py b/pacman/operations/routing_info_allocator_algorithms/malloc_based_routing_allocator/utils.py deleted file mode 100644 index 8ba4c708f..000000000 --- a/pacman/operations/routing_info_allocator_algorithms/malloc_based_routing_allocator/utils.py +++ /dev/null @@ -1,64 +0,0 @@ -# Copyright (c) 2017-2019 The University of Manchester -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . - -from functools import reduce -import itertools - - -def get_possible_masks(n_keys, mask_width=32, contiguous_keys=True): - """ Get the possible masks given the number of keys. - - :param int n_keys: The number of keys to generate a mask for - :param int mask_width: - Number of bits that are meaningful in the mask. 32 by default. - :param bool contiguous_keys: - True if the mask should only have zeros in the LSBs - :return: A generator of all possible masks - :rtype: iterable(int) - """ - # Starting values - n_zeros = (n_keys - 1).bit_length() - assert n_zeros <= mask_width - all_ones_mask = (1 << mask_width) - 1 - - # Get all possible places where the zero bits could be put; this is an - # ideal way to do it too, as it gives us the one with the bits at the - # bottom (the old generation algorithm) first. - places_for_zeroes = itertools.combinations(range(mask_width), n_zeros) - - # If the keys are all contiguous, you can only have one possible mask, - # which is the first one - if contiguous_keys: - zero_bits = next(places_for_zeroes) - return [zero_out_bits(all_ones_mask, zero_bits)] - - # Convert the selected places for zero bits into an iterable of masks - return ( - zero_out_bits(all_ones_mask, zero_bits) - for zero_bits in places_for_zeroes) - - -def zero_out_bits(all_ones_mask, bits_to_zero): - """ Takes a mask (with all interesting bits set to 1) and zeroes out the\ - bits at the given indices. - - :param int all_ones_mask: Initial mask - :param iterable(int) bits_to_zero: Which bits to clear. The LSB is zero. - :return: A single mask, with zeroes in all required places - :rtype: int - """ - return reduce( - (lambda mask, bit_to_zero_out: mask & ~(1 << bit_to_zero_out)), - bits_to_zero, all_ones_mask) diff --git a/pacman/operations/routing_info_allocator_algorithms/zoned_routing_info_allocator.py b/pacman/operations/routing_info_allocator_algorithms/zoned_routing_info_allocator.py index c92273f20..fdfa06cd5 100644 --- a/pacman/operations/routing_info_allocator_algorithms/zoned_routing_info_allocator.py +++ b/pacman/operations/routing_info_allocator_algorithms/zoned_routing_info_allocator.py @@ -17,11 +17,13 @@ import math from spinn_utilities.log import FormatAdapter from spinn_utilities.progress_bar import ProgressBar +from spinn_utilities.ordered_set import OrderedSet from pacman.model.routing_info import ( - RoutingInfo, PartitionRoutingInfo, BaseKeyAndMask) -from pacman.utilities.utility_calls import ( - check_algorithm_can_support_constraints, get_key_ranges) -from pacman.exceptions import PacmanRouteInfoAllocationException + RoutingInfo, MachineVertexRoutingInfo, BaseKeyAndMask, + AppVertexRoutingInfo) +from pacman.utilities.utility_calls import get_key_ranges +from pacman.exceptions import PacmanRouteInfoAllocationException,\ + PacmanInvalidParameterException from pacman.model.constraints.key_allocator_constraints import ( AbstractKeyAllocatorConstraint, ContiguousKeyRangeContraint, FixedKeyAndMaskConstraint) @@ -77,9 +79,8 @@ class ZonedRoutingInfoAllocator(object): """ __slots__ = [ - # Passed in parameters - "__machine_graph", - "__n_keys_map", + # A list of vertices and partitions to allocate + "__vertex_partitions", # For each App vertex / Partition name zone keep track of the number of # bites required for the mask for each machine vertex "__atom_bits_per_app_part", @@ -94,20 +95,21 @@ class ZonedRoutingInfoAllocator(object): "__flexible", # List of (key, n_keys) needed for fixed "__fixed_keys", - # Map of partition to fixed_+key_and_mask + # Map of (partition identifier, machine_vertex) to fixed_key_and_mask "__fixed_partitions", # Set of app_part indexes used by fixed "__fixed_used" ] # pylint: disable=attribute-defined-outside-init - def __call__(self, machine_graph, n_keys_map, flexible): + def __call__(self, app_graph, extra_allocations, flexible): """ - :param MachineGraph machine_graph: - The machine graph to allocate the routing info for - :param AbstractMachinePartitionNKeysMap n_keys_map: - A map between the edges and the number of keys required by the - edges + :param ApplicationGraph app_graph: + The graph to allocate the routing info for + :param list(tuple(ApplicationVertex,str)) extra_allocations: + Additional (vertex, partition identifier) pairs to allocate + keys to. These might not appear in partitions in the graph + due to being added by the system. :param bool flexible: Determines if flexible can be use. If False, global settings will be attempted :return: The routing information @@ -115,10 +117,15 @@ def __call__(self, machine_graph, n_keys_map, flexible): :raise PacmanRouteInfoAllocationException: If something goes wrong with the allocation """ - # check that this algorithm supports the constraints put onto the - # partitions - self.__machine_graph = machine_graph - self.__n_keys_map = n_keys_map + self.__vertex_partitions = OrderedSet( + (p.pre_vertex, p.identifier) + for p in app_graph.outgoing_edge_partitions) + self.__vertex_partitions.update(extra_allocations) + self.__vertex_partitions.update( + (v, p.identifier) + for v in app_graph.vertices + for p in v.splitter.get_internal_multicast_partitions()) + self.__n_bits_atoms_and_mac = 0 self.__n_bits_machine = 0 self.__n_bits_atoms = 0 @@ -128,38 +135,39 @@ def __call__(self, machine_graph, n_keys_map, flexible): self.__fixed_partitions = dict() self.__fixed_used = set() - check_algorithm_can_support_constraints( - constrained_vertices=machine_graph.outgoing_edge_partitions, - supported_constraints=[ - ContiguousKeyRangeContraint, FixedKeyAndMaskConstraint], - abstract_constraint_type=AbstractKeyAllocatorConstraint) - self.__find_fixed() self.__calculate_zones() self.__check_zones() return self.__allocate() + def __check_constraint_supported(self, constraint): + if not isinstance(constraint, AbstractKeyAllocatorConstraint): + return + if isinstance(constraint, ContiguousKeyRangeContraint): + return + raise PacmanInvalidParameterException( + "constraint", constraint, "Unsupported key allocation constraint") + def __find_fixed(self): """ Looks for FixedKeyAmdMask Constraints and keeps track of these. See :py:meth:`__add_fixed` """ - multicast_partitions = self.__machine_graph.multicast_partitions - for app_id in multicast_partitions: - # multicast_partitions is a map of app_id to paritition_vertices - # paritition_vertices is a map of partition(name) to set(vertex) - by_app = multicast_partitions[app_id] - for partition_name, paritition_vertices in by_app.items(): - for mac_vertex in paritition_vertices: - partition = self.__machine_graph.\ - get_outgoing_edge_partition_starting_at_vertex( - mac_vertex, partition_name) - for constraint in partition.constraints: - if isinstance(constraint, FixedKeyAndMaskConstraint): - self.__add_fixed(partition, constraint) - - def __add_fixed(self, partition, constraint): + for pre, identifier in self.__vertex_partitions: + # Try the application vertex + for constraint in pre.constraints: + if isinstance(constraint, FixedKeyAndMaskConstraint): + if constraint.applies_to_partition(identifier): + for vert in pre.splitter.get_out_going_vertices( + identifier): + self.__add_fixed(identifier, vert, constraint) + self.__add_fixed( + identifier, pre, constraint) + else: + self.__check_constraint_supported(constraint) + + def __add_fixed(self, part_id, vertex, constraint): """ Precomputes and caches FixedKeyAndMask for easier use later @@ -168,10 +176,18 @@ def __add_fixed(self, partition, constraint): Saves a list of the keys and their n_keys so these zones can be blocked - :param pacman.model.graphs.AbstractEdgePartition partition: + :param str part_id: The identifier of the partition + :param MachineVertex vertex: The machine vertex this applies to :param FixedKeyAndMaskConstraint constraint: """ - self.__fixed_partitions[partition] = constraint.keys_and_masks + if (part_id, vertex) in self.__fixed_partitions: + raise PacmanInvalidParameterException( + "constraint", constraint, + "Multiple FixedKeyConstraints apply to the same vertex" + f" {vertex} and partition {part_id}:" + f" {constraint.keys_and_masks} and " + f" {self.__fixed_partitions[part_id, vertex]}") + self.__fixed_partitions[part_id, vertex] = constraint.keys_and_masks for key_and_mask in constraint.keys_and_masks: # Go through the mask sets and save keys and n_keys for key, n_keys in get_key_ranges( @@ -190,35 +206,34 @@ def __calculate_zones(self): :raises PacmanRouteInfoAllocationException: """ - multicast_partitions = self.__machine_graph.multicast_partitions progress = ProgressBar( - len(multicast_partitions), "Calculating zones") + len(self.__vertex_partitions), "Calculating zones") # search for size of regions - for app_id in progress.over(multicast_partitions): - by_app = multicast_partitions[app_id] - for partition_name, paritition_vertices in by_app.items(): - max_keys = 0 - for mac_vertex in paritition_vertices: - partition = self.__machine_graph.\ - get_outgoing_edge_partition_starting_at_vertex( - mac_vertex, partition_name) - if partition not in self.__fixed_partitions: - n_keys = self.__n_keys_map.n_keys_for_partition( - partition) - max_keys = max(max_keys, n_keys) - if max_keys > 0: - atom_bits = self.__bits_needed(max_keys) - self.__n_bits_atoms = max(self.__n_bits_atoms, atom_bits) - machine_bits = self.__bits_needed(len(paritition_vertices)) - self.__n_bits_machine = max( - self.__n_bits_machine, machine_bits) - self.__n_bits_atoms_and_mac = max( - self.__n_bits_atoms_and_mac, machine_bits + atom_bits) - self.__atom_bits_per_app_part[ - (app_id, partition_name)] = atom_bits - else: - self.__atom_bits_per_app_part[(app_id, partition_name)] = 0 + for pre, identifier in progress.over(self.__vertex_partitions): + splitter = pre.splitter + machine_vertices = splitter.get_out_going_vertices(identifier) + if not machine_vertices: + continue + max_keys = 0 + for machine_vertex in machine_vertices: + if ((identifier, machine_vertex) not in + self.__fixed_partitions): + n_keys = machine_vertex.get_n_keys_for_partition( + identifier) + max_keys = max(max_keys, n_keys) + + if max_keys > 0: + atom_bits = self.__bits_needed(max_keys) + self.__n_bits_atoms = max(self.__n_bits_atoms, atom_bits) + machine_bits = self.__bits_needed(len(machine_vertices)) + self.__n_bits_machine = max( + self.__n_bits_machine, machine_bits) + self.__n_bits_atoms_and_mac = max( + self.__n_bits_atoms_and_mac, machine_bits + atom_bits) + self.__atom_bits_per_app_part[pre, identifier] = atom_bits + else: + self.__atom_bits_per_app_part[pre, identifier] = 0 def __check_zones(self): # See if it could fit even before considerding fixed @@ -269,50 +284,60 @@ def __set_fixed_used(self): self.__fixed_used.add(i) def __allocate(self): - multicast_partitions = self.__machine_graph.multicast_partitions progress = ProgressBar( - len(multicast_partitions), "Allocating routing keys") + len(self.__vertex_partitions), "Allocating routing keys") routing_infos = RoutingInfo() app_part_index = 0 - for app_id in progress.over(multicast_partitions): + for pre, identifier in progress.over(self.__vertex_partitions): while app_part_index in self.__fixed_used: app_part_index += 1 - for partition_name, paritition_vertices in \ - multicast_partitions[app_id].items(): - # convert set to a list and sort by slice - machine_vertices = list(paritition_vertices) - machine_vertices.sort(key=lambda x: x.vertex_slice.lo_atom) - n_bits_atoms = self.__atom_bits_per_app_part[ - (app_id, partition_name)] - if self.__flexible: + # Get a list of machine vertices ordered by pre-slice + splitter = pre.splitter + machine_vertices = list(splitter.get_out_going_vertices( + identifier)) + if not machine_vertices: + continue + machine_vertices.sort(key=lambda x: x.vertex_slice.lo_atom) + n_bits_atoms = self.__atom_bits_per_app_part[pre, identifier] + if self.__flexible: + n_bits_machine = self.__n_bits_atoms_and_mac - n_bits_atoms + else: + if n_bits_atoms <= self.__n_bits_atoms: + # Ok it fits use global sizes + n_bits_atoms = self.__n_bits_atoms + n_bits_machine = self.__n_bits_machine + else: + # Nope need more bits! Use the flexible approach here n_bits_machine = self.__n_bits_atoms_and_mac - n_bits_atoms + + for machine_index, machine_vertex in enumerate(machine_vertices): + key = (identifier, machine_vertex) + if key in self.__fixed_partitions: + # Ignore zone calculations and just use fixed + keys_and_masks = self.__fixed_partitions[key] else: - if n_bits_atoms <= self.__n_bits_atoms: - # Ok it fits use global sizes - n_bits_atoms = self.__n_bits_atoms - n_bits_machine = self.__n_bits_machine - else: - # Nope need more bits! Use the flexible approach here - n_bits_machine = \ - self.__n_bits_atoms_and_mac - n_bits_atoms - - for machine_index, vertex in enumerate(machine_vertices): - partition = self.__machine_graph.\ - get_outgoing_edge_partition_starting_at_vertex( - vertex, partition_name) - if partition in self.__fixed_partitions: - # Ignore zone calculations and just use fixed - keys_and_masks = self.__fixed_partitions[partition] - else: - mask = self.__mask(n_bits_atoms) - key = app_part_index - key = (key << n_bits_machine) | machine_index - key = key << n_bits_atoms - keys_and_masks = [BaseKeyAndMask( - base_key=key, mask=mask)] - routing_infos.add_partition_info( - PartitionRoutingInfo(keys_and_masks, partition)) - app_part_index += 1 + mask = self.__mask(n_bits_atoms) + key = app_part_index + key = (key << n_bits_machine) | machine_index + key = key << n_bits_atoms + keys_and_masks = [BaseKeyAndMask(base_key=key, mask=mask)] + routing_infos.add_routing_info(MachineVertexRoutingInfo( + keys_and_masks, identifier, machine_vertex, + machine_index)) + + # Add application-level routing information + key = (identifier, pre) + if key in self.__fixed_partitions: + keys_and_masks = self.__fixed_partitions[key] + else: + key = app_part_index << (n_bits_atoms + n_bits_machine) + mask = self.__mask(n_bits_atoms + n_bits_machine) + keys_and_masks = [BaseKeyAndMask(key, mask)] + routing_infos.add_routing_info(AppVertexRoutingInfo( + keys_and_masks, identifier, pre, + self.__mask(n_bits_atoms), n_bits_atoms, + len(machine_vertices) - 1)) + app_part_index += 1 return routing_infos @@ -335,16 +360,17 @@ def __bits_needed(size): return int(math.ceil(math.log(size, 2))) -def flexible_allocate(machine_graph, n_keys_map): +def flexible_allocate(app_graph, extra_allocations): """ Allocated with fixed bits for the Application/Partition index but with the size of the atom and machine bit changing - :param MachineGraph machine_graph: - The machine graph to allocate the routing info for - :param AbstractMachinePartitionNKeysMap n_keys_map: - A map between the edges and the number of keys required by the - edges + :param ApplicationGraph app_graph: + The graph to allocate the routing info for + :param list(tuple(ApplicationVertex,str)) extra_allocations: + Additional (vertex, partition identifier) pairs to allocate + keys to. These might not appear in partitions in the graph + due to being added by the system. :rtype: tuple(RoutingInfo, dict((ApplicationVertex, str), BaseKeyAndMask)) :raise PacmanRouteInfoAllocationException: @@ -354,16 +380,17 @@ def flexible_allocate(machine_graph, n_keys_map): allocator = ZonedRoutingInfoAllocator() - return allocator(machine_graph, n_keys_map, True) + return allocator(app_graph, extra_allocations, True) -def global_allocate(machine_graph, n_keys_map): +def global_allocate(app_graph, extra_allocations): """ - :param MachineGraph machine_graph: - The machine graph to allocate the routing info for - :param AbstractMachinePartitionNKeysMap n_keys_map: - A map between the edges and the number of keys required by the - edges + :param ApplicationGraph app_graph: + The graph to allocate the routing info for + :param list(tuple(ApplicationVertex,str)) extra_allocations: + Additional (vertex, partition identifier) pairs to allocate + keys to. These might not appear in partitions in the graph + due to being added by the system. :rtype: tuple(RoutingInfo, dict((ApplicationVertex, str), BaseKeyAndMask)) :raise PacmanRouteInfoAllocationException: @@ -373,4 +400,4 @@ def global_allocate(machine_graph, n_keys_map): allocator = ZonedRoutingInfoAllocator() - return allocator(machine_graph, n_keys_map, False) + return allocator(app_graph, extra_allocations, False) diff --git a/pacman/operations/routing_table_generators/__init__.py b/pacman/operations/routing_table_generators/__init__.py index 57ba81101..fc2f6a4dc 100644 --- a/pacman/operations/routing_table_generators/__init__.py +++ b/pacman/operations/routing_table_generators/__init__.py @@ -15,7 +15,9 @@ from .basic_routing_table_generator import basic_routing_table_generator from .zoned_routing_table_generator import ZonedRoutingTableGenerator +from .merged_routing_table_generator import merged_routing_table_generator __all__ = [ - "basic_routing_table_generator", "ZonedRoutingTableGenerator" + "basic_routing_table_generator", "ZonedRoutingTableGenerator", + "merged_routing_table_generator" ] diff --git a/pacman/operations/routing_table_generators/basic_routing_table_generator.py b/pacman/operations/routing_table_generators/basic_routing_table_generator.py index 1e28e8559..874f80e57 100644 --- a/pacman/operations/routing_table_generators/basic_routing_table_generator.py +++ b/pacman/operations/routing_table_generators/basic_routing_table_generator.py @@ -20,7 +20,7 @@ def basic_routing_table_generator( - routing_infos, routing_table_by_partitions, machine): + routing_infos, routing_table_by_partitions): """ An basic algorithm that can produce routing tables @@ -29,32 +29,33 @@ def basic_routing_table_generator( :param ~spinn_machine.Machine machine: :rtype: MulticastRoutingTables """ - progress = ProgressBar(machine.n_chips, "Generating routing tables") + progress = ProgressBar( + routing_table_by_partitions.n_routers, "Generating routing tables") routing_tables = MulticastRoutingTables() - for chip in progress.over(machine.chips): - partitions_in_table = routing_table_by_partitions.\ - get_entries_for_router(chip.x, chip.y) - if partitions_in_table: - routing_tables.add_routing_table(__create_routing_table( - chip, partitions_in_table, routing_infos)) + for x, y in progress.over(routing_table_by_partitions.get_routers()): + parts = routing_table_by_partitions.get_entries_for_router(x, y) + routing_tables.add_routing_table(__create_routing_table( + x, y, parts, routing_infos)) return routing_tables -def __create_routing_table(chip, partitions_in_table, routing_infos): +def __create_routing_table(x, y, partitions_in_table, routing_infos): """ - :param ~spinn_machine.Chip chip: + :param int x: + :param int y: :param partitions_in_table: :type partitions_in_table: - dict(AbstractSingleSourcePartition, + dict(((ApplicationVertex or MachineVertex), str), MulticastRoutingTableByPartitionEntry) :param RoutingInfo routing_infos: :rtype: MulticastRoutingTable """ - table = UnCompressedMulticastRoutingTable(chip.x, chip.y) - for partition in partitions_in_table: - r_info = routing_infos.get_routing_info_from_partition(partition) - entry = partitions_in_table[partition] + table = UnCompressedMulticastRoutingTable(x, y) + for source_vertex, partition_id in partitions_in_table: + r_info = routing_infos.get_routing_info_from_pre_vertex( + source_vertex, partition_id) + entry = partitions_in_table[source_vertex, partition_id] for key_and_mask in r_info.keys_and_masks: table.add_multicast_routing_entry( __create_entry(key_and_mask, entry)) diff --git a/pacman/operations/routing_table_generators/merged_routing_table_generator.py b/pacman/operations/routing_table_generators/merged_routing_table_generator.py new file mode 100644 index 000000000..4ea710ad8 --- /dev/null +++ b/pacman/operations/routing_table_generators/merged_routing_table_generator.py @@ -0,0 +1,158 @@ +# Copyright (c) 2017-2019 The University of Manchester +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +from spinn_utilities.progress_bar import ProgressBar +from spinn_machine import MulticastRoutingEntry +from pacman.model.routing_tables import ( + UnCompressedMulticastRoutingTable, MulticastRoutingTables) +from pacman.model.graphs.application import ApplicationVertex + + +def merged_routing_table_generator( + routing_infos, routing_table_by_partitions): + """ Creates routing entries by merging adjacent entries from the same + application vertex when possible. + + :param RoutingInfo routing_infos: + :param MulticastRoutingTableByPartition routing_table_by_partitions: + :param ~spinn_machine.Machine machine: + :rtype: MulticastRoutingTables + """ + progress = ProgressBar( + routing_table_by_partitions.n_routers, "Generating routing tables") + routing_tables = MulticastRoutingTables() + for x, y in progress.over(routing_table_by_partitions.get_routers()): + parts = routing_table_by_partitions.get_entries_for_router(x, y) + routing_tables.add_routing_table(__create_routing_table( + x, y, parts, routing_infos)) + + return routing_tables + + +def __create_routing_table(x, y, partitions_in_table, routing_info): + """ + :param int x: + :param int y: + :param partitions_in_table: + :type partitions_in_table: + dict(((ApplicationVertex or MachineVertex), str), + MulticastRoutingTableByPartitionEntry) + :param RoutingInfo routing_infos: + :rtype: MulticastRoutingTable + """ + table = UnCompressedMulticastRoutingTable(x, y) + iterator = _IteratorWithNext(partitions_in_table.items()) + while iterator.has_next: + (vertex, part_id), entry = iterator.next + r_info = routing_info.get_routing_info_from_pre_vertex(vertex, part_id) + if r_info is None: + raise Exception( + f"Missing Routing information for {vertex}, {part_id}") + entries = [(vertex, part_id, entry, r_info)] + while __match(iterator, vertex, part_id, r_info, entry, routing_info): + (vertex, part_id), entry = iterator.next + r_info = routing_info.get_routing_info_from_pre_vertex( + vertex, part_id) + entries.append((vertex, part_id, entry, r_info)) + + # Now attempt to merge sources together as much as possible + for entry in __merged_keys_and_masks(entries, routing_info): + table.add_multicast_routing_entry(entry) + + for source_vertex, partition_id in partitions_in_table: + entry = partitions_in_table[source_vertex, partition_id] + + return table + + +def __match(iterator, vertex, part_id, r_info, entry, routing_info): + if not iterator.has_next: + return False + if isinstance(vertex, ApplicationVertex): + return False + (next_vertex, next_part_id), next_entry = iterator.peek + if isinstance(next_vertex, ApplicationVertex): + return False + if part_id != next_part_id: + return False + next_r_info = routing_info.get_routing_info_from_pre_vertex( + next_vertex, next_part_id) + if next_r_info is None: + raise KeyError( + f"No routing info found for {next_vertex}, {next_part_id}") + if next_r_info.index != r_info.index + 1: + return False + app_src = vertex.app_vertex + next_app_src = next_vertex.app_vertex + return next_app_src == app_src and entry.has_same_route(next_entry) + + +def __merged_keys_and_masks(entries, routing_info): + if not entries: + return + (vertex, part_id, entry, r_info) = entries[0] + if isinstance(vertex, ApplicationVertex) or len(entries) == 1: + yield MulticastRoutingEntry( + r_info.first_key, r_info.first_mask, defaultable=entry.defaultable, + spinnaker_route=entry.spinnaker_route) + else: + app_r_info = routing_info.get_routing_info_from_pre_vertex( + vertex.app_vertex, part_id) + yield from app_r_info.merge_machine_entries(entries) + + +def __create_entry(key_and_mask, entry): + """ + :param BaseKeyAndMask key_and_mask: + :param MulticastRoutingTableByPartitionEntry entry: + :rtype: MulticastRoutingEntry + """ + return MulticastRoutingEntry( + routing_entry_key=key_and_mask.key_combo, + defaultable=entry.defaultable, mask=key_and_mask.mask, + link_ids=entry.link_ids, processor_ids=entry.processor_ids) + + +class _IteratorWithNext(object): + + def __init__(self, iterable): + self.__iterator = iter(iterable) + try: + self.__next = next(self.__iterator) + self.__has_next = True + except StopIteration: + self.__next = None + self.__has_next = False + + @property + def peek(self): + return self.__next + + @property + def has_next(self): + return self.__has_next + + @property + def next(self): + if not self.__has_next: + raise StopIteration + nxt = self.__next + try: + self.__next = next(self.__iterator) + self.__has_next = True + except StopIteration: + self.__next = None + self.__has_next = False + return nxt diff --git a/pacman/operations/tag_allocator_algorithms/basic_tag_allocator.py b/pacman/operations/tag_allocator_algorithms/basic_tag_allocator.py index 5cd0adaaf..ed3100b12 100644 --- a/pacman/operations/tag_allocator_algorithms/basic_tag_allocator.py +++ b/pacman/operations/tag_allocator_algorithms/basic_tag_allocator.py @@ -13,21 +13,24 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . -from collections import namedtuple +from collections import namedtuple, defaultdict from spinn_utilities.progress_bar import ProgressBar from spinn_utilities.ordered_set import OrderedSet from spinn_machine.tags import IPTag, ReverseIPTag from pacman.model.tags import Tags -from pacman.utilities.utility_objs import ResourceTracker +from pacman.exceptions import PacmanNotFoundError # An arbitrary range of ports from which to allocate ports to Reverse IP Tags _BOARD_PORTS = range(17896, 18000) +# The set of tags available on any chip +_CHIP_TAGS = range(1, 8) + _Task = namedtuple("_Task", "constraint, board, tag, vertex, placement") -def basic_tag_allocator(machine, plan_n_timesteps, placements): +def basic_tag_allocator(machine, placements): """ Basic tag allocator that goes though the boards available and applies\ the IP tags and reverse IP tags as needed. @@ -35,123 +38,94 @@ def basic_tag_allocator(machine, plan_n_timesteps, placements): :param ~spinn_machine.Machine machine: The machine with respect to which to partition the application graph - :param int plan_n_timesteps: number of timesteps to plan for :param Placements placements: :return: list of IP Tags, list of Reverse IP Tags, tag allocation holder :rtype: tuple(list(~spinn_machine.tags.IPTag), list(~spinn_machine.tags.ReverseIPTag), Tags) """ - resource_tracker = ResourceTracker(machine, plan_n_timesteps) + # Keep track of which tags are free by Ethernet chip + tags_available = defaultdict(lambda: OrderedSet(_CHIP_TAGS)) - # Keep track of ports allocated to reverse IP tags and tags that still - # need a port to be allocated - ports_to_allocate = dict() - tags_to_allocate_ports = list() + # Keep track of which ports are free by Ethernet chip + ports_available = defaultdict(lambda: OrderedSet(_BOARD_PORTS)) - # Check that the algorithm can handle the constraints - progress = ProgressBar(placements.n_placements, "Discovering tags") - placements_with_tags = list() + # Go through placements and find tags + tags = Tags() + progress = ProgressBar(placements.n_placements, "Allocating tags") for placement in progress.over(placements.placements): - __gather_placements_with_tags(placement, placements_with_tags) + resources = placement.vertex.resources_required + place_chip = machine.get_chip_at(placement.x, placement.y) + eth_chip = machine.get_chip_at(place_chip.nearest_ethernet_x, + place_chip.nearest_ethernet_y) + tags_on_chip = tags_available[eth_chip.x, eth_chip.y] + for iptag in resources.iptags: + alloc_chip, tag = __get_chip_and_tag( + iptag, eth_chip, tags_on_chip, machine, tags_available) + tags.add_ip_tag( + __create_tag(alloc_chip, placement, iptag, tag), + placement.vertex) + for reverse_iptag in resources.reverse_iptags: + alloc_chip, tag = __get_chip_and_tag( + reverse_iptag, eth_chip, tags_on_chip, machine, tags_available) + port = __get_port(reverse_iptag, eth_chip, ports_available) + tags.add_reverse_ip_tag( + __create_reverse_tag( + eth_chip, placement, reverse_iptag, tag, port), + placement.vertex) - # Go through and allocate the IP tags and constrained reverse IP tags - tags = Tags() - progress = ProgressBar(placements_with_tags, "Allocating tags") - for placement in progress.over(placements_with_tags): - __allocate_tags_for_placement( - placement, resource_tracker, tags, ports_to_allocate, - tags_to_allocate_ports) + return tags - # Finally allocate ports to the unconstrained reverse IP tags - __allocate_ports_for_reverse_ip_tags( - tags_to_allocate_ports, ports_to_allocate, tags) - return tags +def __get_chip_and_tag(iptag, eth_chip, tags_on_chip, machine, tags_available): + if iptag.tag is not None: + tag = iptag.tag + # Try the nearest Ethernet + if iptag.tag in tags_on_chip: + tags_on_chip.remove(iptag.tag) + return eth_chip, tag + else: + return __find_tag_chip(machine, tags_available, tag), tag + else: + if tags_on_chip: + tag = tags_on_chip.pop() + return eth_chip, tag + else: + return __find_free_tag(machine, tags_available) -def __gather_placements_with_tags(placement, collector): - """ - :param Placement placement: - :param list(Placement) collector: - """ - requires = placement.vertex.resources_required - if requires.iptags or requires.reverse_iptags: - ResourceTracker.check_constraints([placement.vertex]) - collector.append(placement) +def __find_tag_chip(machine, tags_available, tag): + for eth_chip in machine.ethernet_connected_chips: + tags_on_chip = tags_available[eth_chip.x, eth_chip.y] + if tag in tags_on_chip: + tags_on_chip.remove(tag) + return eth_chip + raise PacmanNotFoundError(f"Tag {tag} not available on any Ethernet chip") -def __allocate_tags_for_placement( - placement, resource_tracker, tag_collector, ports_collector, - tag_port_tasks): - """ - :param Placement placement: - :param ResourceTracker resource_tracker: - :param Tags tag_collector: - :param dict(str,set(int)) ports_collector: - :param list(_Task) tag_port_tasks: - """ - vertex = placement.vertex - resources = vertex.resources_required - - # Get the constraint details for the tags - (board_address, ip_tags, reverse_ip_tags) = \ - ResourceTracker.get_ip_tag_info(resources, vertex.constraints) - - # Allocate the tags, first-come, first-served, using the fixed - # placement of the vertex, and the required resources - chips = [(placement.x, placement.y)] - (_, _, _, returned_ip_tags, returned_reverse_ip_tags) = \ - resource_tracker.allocate_resources( - resources, chips, placement.p, board_address, ip_tags, - reverse_ip_tags) - - # Put the allocated IP tag information into the tag object - if returned_ip_tags is not None: - for (tag_constraint, (board_address, tag, dest_x, dest_y)) in \ - zip(ip_tags, returned_ip_tags): - ip_tag = IPTag( - board_address=board_address, destination_x=dest_x, - destination_y=dest_y, tag=tag, - ip_address=tag_constraint.ip_address, - port=tag_constraint.port, - strip_sdp=tag_constraint.strip_sdp, - traffic_identifier=tag_constraint.traffic_identifier) - tag_collector.add_ip_tag(ip_tag, vertex) - - if returned_reverse_ip_tags is None: - return - - # Put the allocated reverse IP tag information into the tag object - for tag_constraint, (board_address, tag) in zip( - reverse_ip_tags, returned_reverse_ip_tags): - if board_address not in ports_collector: - ports_collector[board_address] = OrderedSet(_BOARD_PORTS) - if tag_constraint.port is not None: - reverse_ip_tag = ReverseIPTag( - board_address, tag, tag_constraint.port, - placement.x, placement.y, placement.p, - tag_constraint.sdp_port) - tag_collector.add_reverse_ip_tag(reverse_ip_tag, vertex) - - ports_collector[board_address].discard(tag_constraint.port) - else: - tag_port_tasks.append(_Task( - tag_constraint, board_address, tag, vertex, placement)) +def __find_free_tag(machine, tags_available): + for eth_chip in machine.ethernet_connected_chips: + tags_on_chip = tags_available[eth_chip.x, eth_chip.y] + if tags_on_chip: + tag = tags_on_chip.pop() + return eth_chip, tag + raise PacmanNotFoundError("Out of tags!") -def __allocate_ports_for_reverse_ip_tags(tasks, ports, tags): - """ - :param list(_Task) tag_port_tasks: - :param dict(str,set(int)) ports: - :param Tags tags: - """ - for task in tasks: - if task.board not in ports: - ports[task.board] = OrderedSet(_BOARD_PORTS) - port = ports[task.board].pop(last=False) - reverse_ip_tag = ReverseIPTag( - task.board, task.tag, port, - task.placement.x, task.placement.y, task.placement.p, - task.constraint.sdp_port) - tags.add_reverse_ip_tag(reverse_ip_tag, task.vertex) +def __create_tag(eth_chip, placement, iptag, tag): + return IPTag( + eth_chip.ip_address, placement.x, placement.y, + tag, iptag.ip_address, iptag.port, + iptag.strip_sdp, iptag.traffic_identifier) + + +def __create_reverse_tag(eth_chip, placement, reverse_iptag, tag, port): + return ReverseIPTag( + eth_chip.ip_address, tag, port, placement.x, placement.y, placement.p, + reverse_iptag.sdp_port) + + +def __get_port(reverse_ip_tag, eth_chip, ports_available): + if reverse_ip_tag.port is not None: + return reverse_ip_tag.port + return ports_available[eth_chip.x, eth_chip.y].pop() diff --git a/pacman/pacman.cfg b/pacman/pacman.cfg index 91a8999aa..1c031d96e 100644 --- a/pacman/pacman.cfg +++ b/pacman/pacman.cfg @@ -10,5 +10,9 @@ # Note for hard coded locations a "reports" sub directory will be added default_report_file_path = DEFAULT +# These next settings require SpiNNaker-spinner to be installed +draw_placements = False +draw_placements_on_error = False + [Mapping] router_table_compress_as_far_as_possible = False diff --git a/pacman/utilities/algorithm_utilities/machine_algorithm_utilities.py b/pacman/utilities/algorithm_utilities/machine_algorithm_utilities.py deleted file mode 100644 index c43325b78..000000000 --- a/pacman/utilities/algorithm_utilities/machine_algorithm_utilities.py +++ /dev/null @@ -1,71 +0,0 @@ -# Copyright (c) 2017-2019 The University of Manchester -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . - -import sys -from pacman.utilities import constants -from spinn_machine import SDRAM, Chip, Link, Router - - -def create_virtual_chip(machine, link_data, virtual_chip_x, virtual_chip_y): - """ Create a virtual chip on a real machine. - - :param ~spinn_machine.Machine machine: - :param ~spinn_machine.link_data_objects.AbstractLinkData link_data: - Describes the link from the real machine. - :param int virtual_chip_x: Virtual chip coordinate - :param int virtual_chip_y: Virtual chip coordinate - """ - - # If the chip already exists, return the data - if machine.is_chip_at(virtual_chip_x, virtual_chip_y): - if not machine.get_chip_at(virtual_chip_x, virtual_chip_y).virtual: - raise Exception( - "Attempting to add virtual chip in place of a real chip") - return - - # Create link to the virtual chip from the real chip - virtual_link_id = (link_data.connected_link + 3) % 6 - to_virtual_chip_link = Link( - destination_x=virtual_chip_x, - destination_y=virtual_chip_y, - source_x=link_data.connected_chip_x, - source_y=link_data.connected_chip_y, - source_link_id=link_data.connected_link) - - # Create link to the real chip from the virtual chip - from_virtual_chip_link = Link( - destination_x=link_data.connected_chip_x, - destination_y=link_data.connected_chip_y, - source_x=virtual_chip_x, - source_y=virtual_chip_y, - source_link_id=virtual_link_id) - - # create the router - links = [from_virtual_chip_link] - router_object = Router( - links=links, emergency_routing_enabled=False, - n_available_multicast_entries=sys.maxsize) - - # connect the real chip with the virtual one - connected_chip = machine.get_chip_at( - link_data.connected_chip_x, - link_data.connected_chip_y) - connected_chip.router.add_link(to_virtual_chip_link) - - machine.add_virtual_chip(Chip( - n_processors=constants.CORES_PER_VIRTUAL_CHIP, router=router_object, - sdram=SDRAM(size=0), - x=virtual_chip_x, y=virtual_chip_y, - virtual=True, nearest_ethernet_x=None, nearest_ethernet_y=None)) diff --git a/pacman/utilities/algorithm_utilities/partition_algorithm_utilities.py b/pacman/utilities/algorithm_utilities/partition_algorithm_utilities.py index 18970c54f..3d3f1ea2f 100644 --- a/pacman/utilities/algorithm_utilities/partition_algorithm_utilities.py +++ b/pacman/utilities/algorithm_utilities/partition_algorithm_utilities.py @@ -16,10 +16,8 @@ """ A collection of methods which support partitioning algorithms. """ -from spinn_utilities.ordered_set import OrderedSet -from pacman.exceptions import PacmanPartitionException from pacman.model.constraints.partitioner_constraints import ( - AbstractPartitionerConstraint, SameAtomsAsVertexConstraint) + AbstractPartitionerConstraint) VERTICES_NEED_TO_BE_SAME_SIZE_ERROR = ( @@ -40,77 +38,3 @@ def get_remaining_constraints(vertex): """ return [constraint for constraint in vertex.constraints if not isinstance(constraint, AbstractPartitionerConstraint)] - - -def get_same_size_vertex_groups(vertices): - """ Get a dictionary of vertex to vertex that must be partitioned the same\ - size. - - :param iterble(ApplicationVertex) vertices: - :rtype: dict(ApplicationVertex, set(ApplicationVertex)) - """ - - # Dict of vertex to list of vertices with same size - # (repeated lists expected) - same_size_vertices = dict() - - for vertex in vertices: - - # Find all vertices that have a same size constraint associated with - # this vertex - same_size_as_vertices = list() - for constraint in vertex.constraints: - if isinstance(constraint, SameAtomsAsVertexConstraint): - if vertex.n_atoms != constraint.vertex.n_atoms: - raise PacmanPartitionException( - VERTICES_NEED_TO_BE_SAME_SIZE_ERROR.format( - vertex.label, vertex.n_atoms, - constraint.vertex.label, - constraint.vertex.n_atoms)) - same_size_as_vertices.append(constraint.vertex) - - if not same_size_as_vertices: - same_size_vertices[vertex] = {vertex} - continue - - # Go through all the vertices that want to have the same size - # as the top level vertex - for same_size_vertex in same_size_as_vertices: - - # Neither vertex has been seen - if (same_size_vertex not in same_size_vertices and - vertex not in same_size_vertices): - - # add both to a new group - group = OrderedSet([vertex, same_size_vertex]) - same_size_vertices[vertex] = group - same_size_vertices[same_size_vertex] = group - - # Both vertices have been seen elsewhere - elif (same_size_vertex in same_size_vertices and - vertex in same_size_vertices): - - # merge their groups - group_1 = same_size_vertices[vertex] - group_2 = same_size_vertices[same_size_vertex] - group_1.update(group_2) - for vert in group_1: - same_size_vertices[vert] = group_1 - - # The current vertex has been seen elsewhere - elif vertex in same_size_vertices: - - # add the new vertex to the existing group - group = same_size_vertices[vertex] - group.add(same_size_vertex) - same_size_vertices[same_size_vertex] = group - - # The other vertex has been seen elsewhere - elif same_size_vertex in same_size_vertices: - - # so add this vertex to the existing group - group = same_size_vertices[same_size_vertex] - group.add(vertex) - same_size_vertices[vertex] = group - - return same_size_vertices diff --git a/pacman/utilities/algorithm_utilities/placer_algorithm_utilities.py b/pacman/utilities/algorithm_utilities/placer_algorithm_utilities.py deleted file mode 100644 index 397db0d14..000000000 --- a/pacman/utilities/algorithm_utilities/placer_algorithm_utilities.py +++ /dev/null @@ -1,166 +0,0 @@ -# Copyright (c) 2017-2019 The University of Manchester -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . - -import functools - -from pacman.model.resources import ResourceContainer, ConstantSDRAM -from spinn_utilities.ordered_set import OrderedSet -from pacman.model.constraints.placer_constraints import ( - ChipAndCoreConstraint, SameChipAsConstraint, BoardConstraint, - RadialPlacementFromChipConstraint) -from pacman.model.graphs.common.edge_traffic_type import EdgeTrafficType -from pacman.utilities import VertexSorter, ConstraintOrder -from pacman.model.graphs.abstract_virtual import AbstractVirtual - - -def sort_vertices_by_known_constraints(vertices): - """ Sort vertices to be placed by constraint so that those with\ - more restrictive constraints come first. - - :param list(ApplicationVertex) vertices: - :rtype: list(ApplicationVertex) - """ - sorter = VertexSorter([ - ConstraintOrder(ChipAndCoreConstraint, 1, ["p"]), - ConstraintOrder(ChipAndCoreConstraint, 2), - ConstraintOrder(SameChipAsConstraint, 3), - ConstraintOrder(BoardConstraint, 4), - ConstraintOrder(RadialPlacementFromChipConstraint, 5)]) - return sorter.sort(vertices) - - -def get_vertices_on_same_chip(vertex, graph): - """ Get the vertices that must be on the same chip as the given vertex - - :param AbstractVertex vertex: The vertex to search with - :param Graph graph: The graph containing the vertex - :rtype: set(AbstractVertex) - """ - # Virtual vertices can't be forced on different chips - if isinstance(vertex, AbstractVirtual): - return [] - same_chip_as_vertices = OrderedSet() - for constraint in vertex.constraints: - if isinstance(constraint, SameChipAsConstraint): - same_chip_as_vertices.add(constraint.vertex) - - same_chip_as_vertices.update( - edge.post_vertex - for edge in graph.get_edges_starting_at_vertex(vertex) - if edge.traffic_type == EdgeTrafficType.SDRAM) - return same_chip_as_vertices - - -def get_same_chip_vertex_groups(graph): - """ Get a dictionary of vertex to list of vertices that must be placed on\ - the same chip - - :param Graph graph: The graph containing the vertices - :rtype: dict(AbstractVertex, set(AbstractVertex)) - """ - groups = create_vertices_groups(graph.vertices, functools.partial( - get_vertices_on_same_chip, graph=graph)) - # Dict of vertex to set of vertices on same chip (repeated lists expected) - # A empty set value indicates a set that is too big. - same_chip_vertices = dict() - for group in groups: - for vertex in group: - same_chip_vertices[vertex] = group - for vertex in graph.vertices: - if vertex not in same_chip_vertices: - same_chip_vertices[vertex] = {vertex} - return same_chip_vertices - - -def add_set(all_sets, new_set): - """ - Adds a new set into the list of sets, concatenating sets if required. - - If the new set does not overlap any existing sets it is added. - - However if the new sets overlaps one or more existing sets, a superset is - created combining all the overlapping sets. - Existing overlapping sets are removed and only the new superset is added. - - :param list(set) all_sets: List of non-overlapping sets - :param set new_set: - A new set which may or may not overlap the previous sets. - """ - - union = OrderedSet() - removes = [] - for a_set in all_sets: - if not new_set.isdisjoint(a_set): - removes.append(a_set) - union |= a_set - union |= new_set - if removes: - for a_set in removes: - all_sets.remove(a_set) - all_sets.append(union) - - -def create_vertices_groups(vertices, same_group_as_function): - """ - :param iterable(AbstractVertex) vertices: - :param same_group_as_function: - :type same_group_as_function: - callable(AbstractVertex, set(AbstractVertex)) - """ - groups = list() - done = set() - for vertex in vertices: - if vertex in done: - continue - same_chip_as_vertices = same_group_as_function(vertex) - if same_chip_as_vertices: - same_chip_as_vertices.add(vertex) - # Singletons on interesting and added later if needed - if len(same_chip_as_vertices) > 1: - add_set(groups, same_chip_as_vertices) - done.update(same_chip_as_vertices) - return groups - - -def create_requirement_collections(vertices, machine_graph): - """ Get a collection of requirements that includes SDRAM edge resources - """ - - # Get all but the last requirements, keeping the SDRAM edge requirements - required_resources = list() - to_add_partitions = set() - last_resources = None - last_constraints = None - for vertex in vertices: - if last_resources is not None: - required_resources.append([ - last_resources, last_constraints]) - last_resources = vertex.resources_required - last_constraints = vertex.constraints - to_add_partitions.update( - machine_graph.get_sdram_edge_partitions_starting_at_vertex( - vertex)) - - # Add up all the SDRAM edge requirements - total_sdram = 0 - for partition in to_add_partitions: - total_sdram += partition.total_sdram_requirements() - - # Add the SDRAM requirements to the final requirements - resources = ResourceContainer(sdram=ConstantSDRAM(total_sdram)) - resources.extend(last_resources) - required_resources.append([resources, last_constraints]) - - return required_resources diff --git a/pacman/utilities/algorithm_utilities/routing_algorithm_utilities.py b/pacman/utilities/algorithm_utilities/routing_algorithm_utilities.py new file mode 100644 index 000000000..44ed10569 --- /dev/null +++ b/pacman/utilities/algorithm_utilities/routing_algorithm_utilities.py @@ -0,0 +1,610 @@ +# Copyright (c) 2021 The University of Manchester +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +from .routing_tree import RoutingTree +from pacman.exceptions import MachineHasDisconnectedSubRegion +from pacman.model.routing_table_by_partition import ( + MulticastRoutingTableByPartitionEntry) +from pacman.model.graphs import ( + AbstractFPGA, AbstractSpiNNakerLink, AbstractVirtual) +from pacman.model.graphs.application import ApplicationEdgePartition +from collections import deque, defaultdict +import heapq +import itertools + + +def get_app_partitions(app_graph): + """ Find all application partitions. + + Note that where a vertex splitter indicates that it has internal + partitions but is not the source of an external partition, a "fake" + empty application partition is added. This allows the calling + algorithm to loop over the returned list and look at the set of + edges *and* internal partitions to get a complete picture of *all* + targets for each source machine vertex at once. + + Note that where a vertex splitter indicates that it has internal + partitions but is not the source of an external partition, a "fake" + empty application partition is added. This allows the calling + algorithm to loop over the returned list and look at the set of + edges and internal partitions to get a complete picture of all + targets for each source machine vertex at once. + + :param ApplicationGraph app_graph: The application graph to consider + :return: list of partitions; note where there are only internal multicast + partitions, the partition will have no edges. Caller should use + vertex.splitter.get_internal_multicast_partitions for details. + :rtype: list(ApplicationEdgePartition) + """ + + # Find all partitions that need to be dealt with + partitions = list(app_graph.outgoing_edge_partitions) + sources = set(p.pre_vertex for p in partitions) + + # Convert internal partitions to self-connected partitions + for v in app_graph.vertices: + internal_partitions = v.splitter.get_internal_multicast_partitions() + if v not in sources and internal_partitions: + part_ids = set(p.identifier for p in internal_partitions) + for identifier in part_ids: + # Add a partition with no edges to identify this as internal + app_part = ApplicationEdgePartition(identifier, v) + partitions.append(app_part) + return partitions + + +def route_has_dead_links(root, machine): + """ Quickly determine if a route uses any dead links. + + :param RoutingTree root: + The root of the RoutingTree which contains nothing but RoutingTrees + (i.e. no vertices and links). + :param ~spinn_machine.Machine machine: + The machine in which the routes exist. + :return: True if the route uses any dead/missing links, False otherwise. + :rtype: bool + """ + for _, (x, y), routes in root.traverse(): + chip = machine.get_chip_at(x, y) + for route in routes: + if chip is None: + return True + if not chip.router.is_link(route): + return True + return False + + +def avoid_dead_links(root, machine): + """ Modify a RoutingTree to route-around dead links in a Machine. + + Uses A* to reconnect disconnected branches of the tree (due to dead links + in the machine). + + :param RoutingTree root: + The root of the RoutingTree which contains nothing but RoutingTrees + (i.e. no vertices and links). + :param ~spinn_machine.Machine machine: + The machine in which the routes exist. + :return: + A new RoutingTree is produced rooted as before. + :rtype: RoutingTree + """ + # Make a copy of the RoutingTree with all broken parts disconnected + root, lookup, broken_links = _copy_and_disconnect_tree(root, machine) + + # For each disconnected subtree, use A* to connect the tree to *any* other + # disconnected subtree. Note that this process will eventually result in + # all disconnected subtrees being connected, the result is a fully + # connected tree. + for parent, child in broken_links: + child_chips = set(c.chip for c in lookup[child]) + + # Try to reconnect broken links to any other part of the tree + # (excluding this broken subtree itself since that would create a + # cycle). + path = a_star( + child, parent, set(lookup).difference(child_chips), machine) + + # Add new RoutingTree nodes to reconnect the child to the tree. + last_node = lookup[path[0][1]] + last_direction = path[0][0] + for direction, (x, y) in path[1:]: + if (x, y) not in child_chips: + # This path segment traverses new ground so we must create a + # new RoutingTree for the segment. + new_node = RoutingTree((x, y)) + # A* will not traverse anything but chips in this tree so this + # assert is meerly a sanity check that this occurred correctly. + assert (x, y) not in lookup, "Cycle created." + lookup[(x, y)] = new_node + else: + # This path segment overlaps part of the disconnected tree + # (A* doesn't know where the disconnected tree is and thus + # doesn't avoid it). To prevent cycles being introduced, this + # overlapped node is severed from its parent and merged as part + # of the A* path. + new_node = lookup[(x, y)] + + # Find the node's current parent and disconnect it. + for node in lookup[child]: # pragma: no branch + dn = [(d, n) for d, n in node.children if n == new_node] + assert len(dn) <= 1 + if dn: + node.remove_child(dn[0]) + # A node can only have one parent so we can stop now. + break + last_node.append_child((last_direction, new_node)) + last_node = new_node + last_direction = direction + last_node.append_child((last_direction, lookup[child])) + + return root + + +def _copy_and_disconnect_tree(root, machine): + """ + Copy a RoutingTree (containing nothing but RoutingTrees), disconnecting + nodes which are not connected in the machine. + + Note that if a dead chip is part of the input RoutingTree, no corresponding + node will be included in the copy. The assumption behind this is that the + only reason a tree would visit a dead chip is because a route passed + through the chip and wasn't actually destined to arrive at that chip. This + situation is impossible to confirm since the input routing trees have not + yet been populated with vertices. The caller is responsible for being + sensible. + + :param RoutingTree root: + The root of the RoutingTree that contains nothing but RoutingTrees + (i.e. no children which are vertices or links). + :param ~spinn_machine.Machine machine: + The machine in which the routes exist + :return: (root, lookup, broken_links) + Where: + * `root` is the new root of the tree + :py:class:`~.RoutingTree` + * `lookup` is a dict {(x, y): + :py:class:`~.RoutingTree`, ...} + * `broken_links` is a set ([(parent, child), ...]) containing all + disconnected parent and child (x, y) pairs due to broken links. + :rtype: tuple(RoutingTree, dict(tuple(int,int),RoutingTree), + set(tuple(tuple(int,int),tuple(int,int)))) + """ + new_root = None + + # Lookup for copied routing tree {(x, y): RoutingTree, ...} + new_lookup = {} + + # List of missing connections in the copied routing tree [(new_parent, + # new_child), ...] + broken_links = set() + + # A queue [(new_parent, direction, old_node), ...] + to_visit = deque([(None, None, root)]) + while to_visit: + new_parent, direction, old_node = to_visit.popleft() + + if machine.is_chip_at(old_node.chip[0], old_node.chip[1]): + # Create a copy of the node + new_node = RoutingTree(old_node.chip) + new_lookup[new_node.chip] = new_node + else: + # This chip is dead, move all its children into the parent node + assert new_parent is not None, \ + "Net cannot be sourced from a dead chip." + new_node = new_parent + + if new_parent is None: + # This is the root node + new_root = new_node + else: + if new_node is not new_parent: + # If this node is not dead, check connectivity to parent + # node (no reason to check connectivity between a dead node + # and its parent). + if _is_linked( + new_parent.chip, new_node.chip, direction, machine): + # Is connected via working link + new_parent.append_child((direction, new_node)) + else: + # Link to parent is dead (or original parent was dead and + # the new parent is not adjacent) + broken_links.add((new_parent.chip, new_node.chip)) + + # Copy children + for child_direction, child in old_node.children: + to_visit.append((new_node, child_direction, child)) + + return new_root, new_lookup, broken_links + + +def a_star(sink, heuristic_source, sources, machine): + """ Use A* to find a path from any of the sources to the sink. + + Note that the heuristic means that the search will proceed towards + heuristic_source without any concern for any other sources. This means that + the algorithm may miss a very close neighbour in order to pursue its goal + of reaching heuristic_source. This is not considered a problem since 1) the + heuristic source will typically be in the direction of the rest of the tree + and near by and often the closest entity 2) it prevents us accidentally + forming loops in the rest of the tree since we'll stop as soon as we touch + any part of it. + + :param tuple(int,int) sink: (x, y) + :param tuple(int,int) heuristic_source: (x, y) + An element from `sources` which is used as a guiding heuristic for the + A* algorithm. + :param set(tuple(int,int)) sources: set([(x, y), ...]) + :param ~spinn_machine.Machine machine: + :return: [(int, (x, y)), ...] + A path starting with a coordinate in `sources` and terminating at + connected neighbour of `sink` (i.e. the path does not include `sink`). + The direction given is the link down which to proceed from the given + (x, y) to arrive at the next point in the path. + :rtype: list(tuple(int,tuple(int,int))) + """ + # Select the heuristic function to use for distances + heuristic = (lambda the_node: machine.get_vector_length( + the_node, heuristic_source)) + + # A dictionary {node: (direction, previous_node}. An entry indicates that + # 1) the node has been visited and 2) which node we hopped from (and the + # direction used) to reach previous_node. This may be None if the node is + # the sink. + visited = {sink: None} + + # The node which the tree will be reconnected to + selected_source = None + + # A heap (accessed via heapq) of (distance, (x, y)) where distance is the + # distance between (x, y) and heuristic_source and (x, y) is a node to + # explore. + to_visit = [(heuristic(sink), sink)] + while to_visit: + _, node = heapq.heappop(to_visit) + + # Terminate if we've found the destination + if node in sources: + selected_source = node + break + + # Try all neighbouring locations. + for neighbour_link in range(6): # Router.MAX_LINKS_PER_ROUTER + # Note: link identifiers arefrom the perspective of the neighbour, + # not the current node! + neighbour = machine.xy_over_link( + # Same as Router.opposite + node[0], node[1], (neighbour_link + 3) % 6) + + # Skip links which are broken + if not machine.is_link_at( + neighbour[0], neighbour[1], neighbour_link): + continue + + # Skip neighbours who have already been visited + if neighbour in visited: + continue + + # Explore all other neighbours + visited[neighbour] = (neighbour_link, node) + heapq.heappush(to_visit, (heuristic(neighbour), neighbour)) + + # Fail of no paths exist + if selected_source is None: + raise MachineHasDisconnectedSubRegion( + "Could not find path from {} to {}".format( + sink, heuristic_source)) + + # Reconstruct the discovered path, starting from the source we found and + # working back until the sink. + path = [(visited[selected_source][0], selected_source)] + while visited[path[-1][1]][1] != sink: + node = visited[path[-1][1]][1] + direction = visited[node][0] + path.append((direction, node)) + + return path + + +def _is_linked(source, target, direction, machine): + """ + :param tuple(int,int) source: + :param tuple(int,int) target: + :param int direction: + :param ~spinn_machine.Machine machine: + :rtype: bool + """ + s_chip = machine.get_chip_at(source[0], source[1]) + if s_chip is None: + return False + link = s_chip.router.get_link(direction) + if link is None: + return False + if link.destination_x != target[0]: + return False + if link.destination_y != target[1]: + return False + return True + + +def convert_a_route( + routing_tables, source_vertex, partition_id, incoming_processor, + incoming_link, route, targets_by_chip): + """ + Converts the algorithm specific partition_route back to standard spinnaker + and ands it to the routing_tables. + + :param MulticastRoutingTableByPartition routing_tables: + spinnaker format routing tables + :param AbstractSingleSourcePartition partition: + Partition this route applies to + :param int or None incoming_processor: processor this link came from + :param int or None incoming_link: link this link came from + :param RoutingTree route: algorithm specific format of the route + :param dict((int,int),(list,list)): targets_by_chip: + Target cores and links of things on the route that are final end points + """ + x, y = route.chip + + next_hops = list() + processor_ids = list() + link_ids = list() + for (route, next_hop) in route.children: + if route is not None: + link_ids.append(route) + next_incoming_link = (route + 3) % 6 + if next_hop is not None: + next_hops.append((next_hop, next_incoming_link)) + if (x, y) in targets_by_chip: + cores, links = targets_by_chip[x, y] + processor_ids.extend(cores) + link_ids.extend(links) + + entry = MulticastRoutingTableByPartitionEntry( + link_ids, processor_ids, incoming_processor, incoming_link) + routing_tables.add_path_entry(entry, x, y, source_vertex, partition_id) + + for next_hop, next_incoming_link in next_hops: + convert_a_route( + routing_tables, source_vertex, partition_id, None, + next_incoming_link, next_hop, targets_by_chip) + + +def longest_dimension_first(vector, start, machine): + """ + List the (x, y) steps on a longest-dimension first route. + + :param tuple(int,int,int) vector: (x, y, z) + The vector which the path should cover. + :param tuple(int,int) start: (x, y) + The coordinates from which the path should start (note this is a 2D + coordinate). + :param ~spinn_machine.Machine machine: + :return: + :rtype: list(tuple(int,tuple(int, int))) + """ + return vector_to_nodes( + sorted(enumerate(vector), key=(lambda x: abs(x[1])), reverse=True), + start, machine) + + +def least_busy_dimension_first(traffic, vector, start, machine): + """ List the (x, y) steps on a route that goes through the least busy\ + routes first. + + :param traffic: A dictionary of (x, y): count of routes + :param vector: (x, y, z) + The vector which the path should cover. + :param start: (x, y) + The coordinates from which the path should start (note this is a 2D + coordinate). + :param machine:the spinn machine. + :return: min route + """ + + # Go through and find the sum of traffic depending on the route taken + min_sum = 0 + min_route = None + for order in itertools.permutations([0, 1, 2]): + dm_vector = [(i, vector[i]) for i in order] + route = vector_to_nodes(dm_vector, start, machine) + sum_traffic = sum(traffic[x, y] for _, (x, y) in route) + if min_route is None or min_sum > sum_traffic: + min_sum = sum_traffic + min_route = route + + for _, (x, y) in min_route: + traffic[x, y] += 1 + + return min_route + + +def vector_to_nodes(dm_vector, start, machine): + """ Convert a vector to a set of nodes + + :param list(tuple(int,int)) dm_vector: + A vector made up of a list of (dimension, magnitude), where dimensions + are x=0, y=1, z=diagonal=2 + :param tuple(int,int) start: The x, y coordinates of the start + :param Machine machine: The machine to apply the vector to + :return: A list of (link_id, (target_x, target_y)) of nodes on a route + :rtype: list(tuple(int,tuple(int, int))) + """ + x, y = start + + out = [] + + for dimension, magnitude in dm_vector: + if magnitude == 0: + continue + + if dimension == 0: # x + if magnitude > 0: + # Move East (0) magnitude times + for _ in range(magnitude): + x, y = machine.xy_over_link(x, y, 0) + out.append((0, (x, y))) + else: + # Move West (3) -magnitude times + for _ in range(magnitude, 0): + x, y = machine.xy_over_link(x, y, 3) + out.append((3, (x, y))) + elif dimension == 1: # y + if magnitude > 0: + # Move North (2) magnitude times + for _ in range(magnitude): + x, y = machine.xy_over_link(x, y, 2) + out.append((2, (x, y))) + else: + # Move South (5) -magnitude times + for _ in range(magnitude, 0): + x, y = machine.xy_over_link(x, y, 5) + out.append((5, (x, y))) + else: # z + if magnitude > 0: + # Move SouthWest (4) magnitude times + for _ in range(magnitude): + x, y = machine.xy_over_link(x, y, 4) + out.append((4, (x, y))) + else: + # Move NorthEast (1) -magnitude times + for _ in range(magnitude, 0): + x, y = machine.xy_over_link(x, y, 1) + out.append((1, (x, y))) + return out + + +def nodes_to_trees(nodes, start, route): + """ Convert a list of nodes into routing trees, adding them to existing + routes + + :param list(tuple(int,tuple(int,int))) nodes: + The list of (link_id, (target_x, target_y)) nodes on the route + :param tuple(int,int) start: The start of the route + :param dict(tuple(int,int),RoutingTree) route: + Existing routing trees, with key (x, y) coordinates of the chip of the + routes. + """ + last_node = route.get(start) + if last_node is None: + last_node = RoutingTree(start) + route[start] = last_node + for direction, (x, y) in nodes: + this_node = RoutingTree((x, y)) + route[(x, y)] = this_node + + last_node.append_child((direction, this_node)) + last_node = this_node + + +def most_direct_route(source, dest, machine): + """ Find the most direct route from source to target on the machine + + :param tuple(int,int) source: The source x, y coordinates + :param tuple(int,int) dest: The destination x, y coordinated + :param Machine machine: The machine on which to route + """ + vector = machine.get_vector(source, dest) + nodes = longest_dimension_first(vector, source, machine) + route = dict() + nodes_to_trees(nodes, source, route) + root = route[source] + if route_has_dead_links(root, machine): + root = avoid_dead_links(root, machine) + return root + + +def targets_by_chip(vertices, placements, machine): + """ Get the target links and cores on the relevant chips + + :param list(MachineVertex) vertices: The vertices to target + :param Placements placements: Where the vertices are placed + :param Machine machine: The machine placed on + :return: A dict of (x, y) to target (cores, links) + :rtype: dict((int, int), (list, list)) + """ + by_chip = defaultdict(lambda: (set(), set())) + for vertex in vertices: + x, y = vertex_xy(vertex, placements, machine) + if isinstance(vertex, AbstractVirtual): + # Sinks with route-to-endpoint constraints must be routed + # in the according directions. + link = route_to_endpoint(vertex, machine) + by_chip[x, y][1].add(link) + else: + core = placements.get_placement_of_vertex(vertex).p + by_chip[x, y][0].add(core) + return by_chip + + +def vertex_xy(vertex, placements, machine): + """ + :param MachineVertex vertex: + :param Placements placements: + :param ~spinn_machine.Machine machine: + :rtype: tuple(int,int) + """ + if not isinstance(vertex, AbstractVirtual): + placement = placements.get_placement_of_vertex(vertex) + return placement.x, placement.y + link_data = None + if isinstance(vertex, AbstractFPGA): + link_data = machine.get_fpga_link_with_id( + vertex.fpga_id, vertex.fpga_link_id, vertex.board_address) + elif isinstance(vertex, AbstractSpiNNakerLink): + link_data = machine.get_spinnaker_link_with_id( + vertex.spinnaker_link_id, vertex.board_address) + return link_data.connected_chip_x, link_data.connected_chip_y + + +def vertex_xy_and_route(vertex, placements, machine): + """ Get the non-virtual chip coordinates, the vertex, and processor or + link to follow to get to the vertex + + :param MachineVertex vertex: + :param Placements placements: + :param ~spinn_machine.Machine machine: + :return: the xy corridinates of the target vertex mapped to a tuple of + the vertex, core and link. + One of core or link is provided the other is None + :rtype: tuple(tuple(int, int), tuple(MachineVertex, int, None)) or + tuple(tuple(int, int), tuple(MachineVertex, None, int)) + """ + if not isinstance(vertex, AbstractVirtual): + placement = placements.get_placement_of_vertex(vertex) + return (placement.x, placement.y), (vertex, placement.p, None) + link_data = None + if isinstance(vertex, AbstractFPGA): + link_data = machine.get_fpga_link_with_id( + vertex.fpga_id, vertex.fpga_link_id, vertex.board_address) + elif isinstance(vertex, AbstractSpiNNakerLink): + link_data = machine.get_spinnaker_link_with_id( + vertex.spinnaker_link_id, vertex.board_address) + return ((link_data.connected_chip_x, link_data.connected_chip_y), + (vertex, None, link_data.connected_link)) + + +def route_to_endpoint(vertex, machine): + """ + :param MachineVertex vertex: + :param ~spinn_machine.Machine machine: + :rtype: int + """ + if isinstance(vertex, AbstractFPGA): + link_data = machine.get_fpga_link_with_id( + vertex.fpga_id, vertex.fpga_link_id, vertex.board_address) + else: + link_data = machine.get_spinnaker_link_with_id( + vertex.spinnaker_link_id, vertex.board_address) + return link_data.connected_link diff --git a/pacman/utilities/algorithm_utilities/routing_info_allocator_utilities.py b/pacman/utilities/algorithm_utilities/routing_info_allocator_utilities.py deleted file mode 100644 index b7f380df5..000000000 --- a/pacman/utilities/algorithm_utilities/routing_info_allocator_utilities.py +++ /dev/null @@ -1,275 +0,0 @@ -# Copyright (c) 2017-2019 The University of Manchester -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . - -import logging -from spinn_utilities.log import FormatAdapter -from spinn_utilities.ordered_set import OrderedSet -from pacman.model.constraints.key_allocator_constraints import ( - FixedKeyFieldConstraint, - ContiguousKeyRangeContraint, FixedMaskConstraint, - FixedKeyAndMaskConstraint, ShareKeyConstraint) -from pacman.model.graphs.common import EdgeTrafficType -from pacman.utilities.utility_calls import locate_constraints_of_type -from pacman.exceptions import ( - PacmanValueError, PacmanConfigurationException, - PacmanInvalidParameterException, PacmanRouteInfoAllocationException) - -logger = FormatAdapter(logging.getLogger(__name__)) - - -class ConstraintGroup(list): - """ A list of edges that share a constraint. - """ - - def __init__(self, values): - """ - :param iterable(AbstractSingleSourcePartition) values: - """ - super().__init__(values) - self._constraint = None - self._n_keys = None - - @property - def constraint(self): - """ The shared constraint. - - :rtype: AbstractConstraint - """ - return self._constraint - - def _set_constraint(self, constraint): - self._constraint = constraint - - def __hash__(self): - return id(self).__hash__() - - def __eq__(self, other): - return id(other) == id(self) - - def __ne__(self, other): - return id(other) != id(self) - - -_ALL_FIXED_TYPES = ( - FixedKeyAndMaskConstraint, FixedMaskConstraint, FixedKeyFieldConstraint) - - -def get_mulitcast_edge_groups(machine_graph): - """ Utility method to get groups of multicast edges using any\ - :py:class:`KeyAllocatorSameKeyConstraint` constraints. Note that no\ - checking is done here about conflicts related to other constraints. - - :param MachineGraph machine_graph: the machine graph - :return: (fixed key groups, shared key groups, fixed mask groups, - fixed field groups, continuous groups, noncontinuous groups) - :rtype: tuple(list(ConstraintGroup), list(ConstraintGroup), - list(ConstraintGroup), list(ConstraintGroup), list(ConstraintGroup), - list(ConstraintGroup)) - """ - - # mapping between partition and shared key group it is in - partition_groups = dict() - - # process each partition one by one in a bubble sort kinda way - for vertex in machine_graph.vertices: - for partition in machine_graph.\ - get_multicast_edge_partitions_starting_at_vertex(vertex): - - # Get a set of partitions that should be grouped together - shared_key_constraints = locate_constraints_of_type( - partition.constraints, ShareKeyConstraint) - partitions_to_group = [partition] - for constraint in shared_key_constraints: - partitions_to_group.extend(constraint.other_partitions) - - # Get a set of groups that should be grouped - groups_to_group = [ - partition_groups.get(part_to_group, [part_to_group]) - for part_to_group in partitions_to_group] - - # Group the groups - new_group = ConstraintGroup( - part for group in groups_to_group for part in group) - partition_groups.update( - {part: new_group for part in new_group}) - - # Keep track of groups - fixed_key_groups = list() - shared_key_groups = list() - fixed_mask_groups = list() - fixed_field_groups = list() - continuous_groups = list() - noncontinuous_groups = list() - groups_by_type = { - FixedKeyAndMaskConstraint: fixed_key_groups, - FixedMaskConstraint: fixed_mask_groups, - FixedKeyFieldConstraint: fixed_field_groups, - } - groups = OrderedSet(partition_groups.values()) - for group in groups: - - # Get all expected constraints in the group - constraints = [ - constraint for partition in group - for constraint in locate_constraints_of_type( - partition.constraints, _ALL_FIXED_TYPES)] - - # Check that the possibly conflicting constraints are equal - if constraints and not all( - constraint_a == constraint_b for constraint_a in constraints - for constraint_b in constraints): - raise PacmanRouteInfoAllocationException( - "The group of partitions {} have conflicting constraints" - .format(constraints)) - - # If constraints found, put the group in the appropriate constraint - # group - if constraints: - # pylint:disable=protected-access - group._set_constraint(constraints[0]) - constraint_type = type(constraints[0]) - groups_by_type[constraint_type].append(group) - # If no constraints, must be one of the non-specific groups - # If the group has only one item, it is not shared - elif len(group) == 1: - continuous_constraints = ( - constraint for partition in group - for constraint in locate_constraints_of_type( - constraints, ContiguousKeyRangeContraint)) - if any(continuous_constraints): - continuous_groups.append(group) - else: - noncontinuous_groups.append(group) - # If the group has more than one partition, it must be shared - else: - shared_key_groups.append(group) - - # return the set of groups - return (fixed_key_groups, shared_key_groups, fixed_mask_groups, - fixed_field_groups, continuous_groups, noncontinuous_groups) - - -def check_types_of_edge_constraint(machine_graph): - """ Go through the graph for operations and checks that the constraints\ - are compatible. - - :param MachineGraph machine_graph: the graph to search through - :raises PacmanConfigurationException: if a problem is found - """ - for partition in machine_graph.outgoing_edge_partitions: - if partition.traffic_type != EdgeTrafficType.MULTICAST: - continue - fixed_key = locate_constraints_of_type( - partition.constraints, FixedKeyAndMaskConstraint) - fixed_mask = locate_constraints_of_type( - partition.constraints, FixedMaskConstraint) - fixed_field = locate_constraints_of_type( - partition.constraints, FixedKeyFieldConstraint) - - if len(fixed_key) > 1 or len(fixed_field) > 1 or len(fixed_mask) > 1: - raise PacmanConfigurationException( - "There are multiple constraint of the same type on partition " - "{} starting at {}. Please fix and try again.".format( - partition.identifier, partition.pre_vertex)) - - fixed_key = len(fixed_key) == 1 - fixed_mask = len(fixed_mask) == 1 - fixed_field = len(fixed_field) == 1 - - # check for fixed key and a fixed mask. as these should have been - # merged before now - if fixed_key and fixed_mask: - raise PacmanConfigurationException( - "The partition {} starting at {} has a fixed key and fixed " - "mask constraint. These can be merged together, but is " - "deemed an error here".format( - partition.identifer, partition.pre_vertex)) - - # check for a fixed key and fixed field, as these are incompatible - if fixed_key and fixed_field: - raise PacmanConfigurationException( - "The partition {} starting at {} has a fixed key and fixed " - "field constraint. These may be merge-able together, but is " - "deemed an error here".format( - partition.identifer, partition.pre_vertex)) - - # check that a fixed mask and fixed field have compatible masks - if fixed_mask and fixed_field: - _check_masks_are_correct(partition) - - -def _check_masks_are_correct(partition): - """ Check that the masks between a fixed mask constraint and a fixed_field\ - constraint. Raises error if not. - - :param AbstractSingleSourcePartition partition: - the outgoing_edge_partition to search for these constraints - :raise PacmanInvalidParameterException: if the masks are incompatible - """ - fixed_mask = locate_constraints_of_type( - partition.constraints, FixedMaskConstraint)[0] - fixed_field = locate_constraints_of_type( - partition.constraints, FixedKeyFieldConstraint)[0] - mask = fixed_mask.mask - for field in fixed_field.fields: - if field.mask & mask != field.mask: - raise PacmanInvalidParameterException( - "field.mask, mask", - "The field mask {} is outside of the mask {}".format( - field.mask, mask), - "{}:{}".format(field.mask, mask)) - for other_field in fixed_field.fields: - if other_field != field and other_field.mask & field.mask != 0: - raise PacmanInvalidParameterException( - "field.mask, mask", - "Field masks {} and {} overlap".format( - field.mask, other_field.mask), - "{}:{}".format(field.mask, mask)) - - -def get_fixed_mask(same_key_group): - """ Get a fixed mask from a group of edges if a\ - :py:class:`FixedMaskConstraint`\ - constraint exists in any of the edges in the group. - - :param iterable(MachineEdge) same_key_group: - Set of edges that are to be assigned the same keys and masks - :return: The fixed mask if found, or None - :rtype: tuple(int or None, iterable(Field) or None) - :raise PacmanValueError: If two edges conflict in their requirements - """ - mask = None - fields = None - edge_with_mask = None - for edge in same_key_group: - for constraint in locate_constraints_of_type( - edge.constraints, FixedMaskConstraint): - if mask is not None and mask != constraint.mask: - raise PacmanValueError( - "Two Edges {} and {} must have the same key and mask, " - "but have different fixed masks, {} and {}".format( - edge, edge_with_mask, mask, constraint.mask)) - if (fields is not None and constraint.fields is not None and - fields != constraint.fields): - raise PacmanValueError( - "Two Edges {} and {} must have the same key and mask, " - "but have different field ranges".format( - edge, edge_with_mask)) - mask = constraint.mask - edge_with_mask = edge - if constraint.fields is not None: - fields = constraint.fields - - return mask, fields diff --git a/pacman/operations/router_algorithms/routing_tree.py b/pacman/utilities/algorithm_utilities/routing_tree.py similarity index 95% rename from pacman/operations/router_algorithms/routing_tree.py rename to pacman/utilities/algorithm_utilities/routing_tree.py index 0d37ad0cf..843ad3346 100644 --- a/pacman/operations/router_algorithms/routing_tree.py +++ b/pacman/utilities/algorithm_utilities/routing_tree.py @@ -40,15 +40,20 @@ class RoutingTree(object): # object # * Storing the chip coordinate as two values (_chip_x and _chip_y) rather # than a tuple saves 56 bytes per instance. - __slots__ = ["_chip_x", "_chip_y", "_children"] + __slots__ = ["_chip_x", "_chip_y", "_children", "_label"] - def __init__(self, chip): + def __init__(self, chip, label=None): """ :param tuple(int,int) chip: The chip the route is currently passing through. """ self.chip = chip self._children = [] + self._label = label + + @property + def label(self): + return self._label @property def chip(self): @@ -105,6 +110,10 @@ def remove_child(self, child): """ self._children.remove(child) + @property + def is_leaf(self): + return not self._children + def __iter__(self): """Iterate over this node and then all its children, recursively and in no specific order. This iterator iterates over the child *objects* diff --git a/pacman/utilities/json_utils.py b/pacman/utilities/json_utils.py index c0699b3fe..0c37ce7a8 100644 --- a/pacman/utilities/json_utils.py +++ b/pacman/utilities/json_utils.py @@ -18,20 +18,10 @@ import json import gzip -from pacman.model.constraints.key_allocator_constraints import ( - ContiguousKeyRangeContraint, FixedKeyAndMaskConstraint, - FixedMaskConstraint) -from pacman.model.constraints.placer_constraints import ( - BoardConstraint, ChipAndCoreConstraint, RadialPlacementFromChipConstraint, - SameChipAsConstraint) -from pacman.model.constraints.partitioner_constraints import ( - SameAtomsAsVertexConstraint) from pacman.model.resources import ( CPUCyclesPerTickResource, DTCMResource, IPtagResource, ResourceContainer, VariableSDRAM) -from pacman.model.routing_info import BaseKeyAndMask -from pacman.model.graphs.machine import ( - MachineEdge, MachineGraph, SimpleMachineVertex) +from pacman.model.graphs.machine import SimpleMachineVertex from pacman.model.placements.placement import Placement @@ -54,108 +44,6 @@ def json_to_object(json_object): return json_object -_LOCATION_CONSTRAINTS = ( - ChipAndCoreConstraint, RadialPlacementFromChipConstraint) -_VERTEX_CONSTRAINTS = (SameChipAsConstraint, SameAtomsAsVertexConstraint) - - -def constraint_to_json(constraint): - """ Converts a constraint to JSON. - - .. note:: - - Vertexes are represented by just their label. - - If an unexpected constraint is received, the str() and repr() values - are saved - - If an Exception occurs, that is caught and added to the JSON object. - - :param AbstractConstraint constraint: The constraint to describe - :return: A dict describing the constraint - :rtype: dict - """ - json_dict = dict() - try: - json_dict["class"] = constraint.__class__.__name__ - if isinstance(constraint, BoardConstraint): - json_dict["board_address"] = constraint.board_address - elif isinstance(constraint, _LOCATION_CONSTRAINTS): - json_dict["x"] = constraint.x - json_dict["y"] = constraint.y - if isinstance(constraint, ChipAndCoreConstraint): - if constraint.p is not None: - json_dict["p"] = constraint.p - elif isinstance(constraint, _VERTEX_CONSTRAINTS): - json_dict["vertex"] = constraint.vertex.label - elif isinstance(constraint, FixedKeyAndMaskConstraint): - json_dict["keys_and_masks"] = key_masks_to_json( - constraint.keys_and_masks) - if constraint.key_list_function: - json_dict["key_list_function"] = str( - constraint.key_list_function) - elif isinstance(constraint, FixedMaskConstraint): - json_dict["mask"] = constraint.mask - elif isinstance(constraint, ContiguousKeyRangeContraint): - # No extra parameters - pass - else: - # Oops an unexpected class - # Classes Not covered include - # FixedKeyFieldConstraint - # FlexiKeyFieldConstraint - # ShareKeyConstraint - json_dict["str"] = str(constraint) - json_dict["repr"] = repr(constraint) - except Exception as ex: # pylint: disable=broad-except - json_dict["exception"] = str(ex) - return json_dict - - -def constraint_from_json(json_dict, graph=None): - if json_dict["class"] == "BoardConstraint": - return BoardConstraint(json_dict["board_address"]) - if json_dict["class"] == "ChipAndCoreConstraint": - if "p" in json_dict: - p = json_dict["p"] - else: - p = None - return ChipAndCoreConstraint(json_dict["x"], json_dict["y"], p) - if json_dict["class"] == "ContiguousKeyRangeContraint": - return ContiguousKeyRangeContraint() - if json_dict["class"] == "FixedKeyAndMaskConstraint": - if "key_list_function" in json_dict: - raise NotImplementedError( - "key_list_function {}".format(json_dict["key_list_function"])) - return FixedKeyAndMaskConstraint( - key_masks_from_json(json_dict["keys_and_masks"])) - if json_dict["class"] == "FixedMaskConstraint": - return FixedMaskConstraint(json_dict["mask"]) - if json_dict["class"] == "RadialPlacementFromChipConstraint": - return RadialPlacementFromChipConstraint( - json_dict["x"], json_dict["y"]) - if json_dict["class"] == "SameChipAsConstraint": - return SameChipAsConstraint(vertex_lookup(json_dict["vertex"], graph)) - if json_dict["class"] == "SameAtomsAsVertexConstraint": - return SameAtomsAsVertexConstraint( - vertex_lookup(json_dict["vertex"], graph)) - raise NotImplementedError("constraint {}".format(json_dict["class"])) - - -def constraints_to_json(constraints): - json_list = [] - for constraint in constraints: - json_list.append(constraint_to_json(constraint)) - return json_list - - -def constraints_from_json(json_list, graph): - constraints = [] - for sub in json_list: - constraints.append(constraint_from_json(sub, graph)) - return constraints - - def key_mask_to_json(key_mask): try: json_object = dict() @@ -166,24 +54,6 @@ def key_mask_to_json(key_mask): return json_object -def key_mask_from_json(json_dict): - return BaseKeyAndMask(json_dict["key"], json_dict["mask"]) - - -def key_masks_to_json(key_masks): - json_list = [] - for key_mask in key_masks: - json_list.append(key_mask_to_json(key_mask)) - return json_list - - -def key_masks_from_json(json_list): - key_masks = [] - for sub in json_list: - key_masks.append(key_mask_from_json(sub)) - return key_masks - - def resource_container_to_json(container): json_dict = dict() try: @@ -253,7 +123,6 @@ def vertex_to_json(vertex): try: json_dict["class"] = vertex.__class__.__name__ json_dict["label"] = vertex.label - json_dict["constraints"] = constraints_to_json(vertex.constraints) if vertex.resources_required is not None: json_dict["resources"] = resource_container_to_json( vertex.resources_required) @@ -263,75 +132,8 @@ def vertex_to_json(vertex): def vertex_from_json(json_dict, convert_constraints=True): - if convert_constraints: - constraints = constraints_from_json( - json_dict["constraints"], graph=None) - else: - constraints = [] resources = resource_container_from_json(json_dict.get("resources")) - return SimpleMachineVertex( - resources, label=json_dict["label"], constraints=constraints) - - -def vertex_add_contstraints_from_json(json_dict, graph): - vertex = vertex_lookup(json_dict["label"], graph) - constraints = constraints_from_json(json_dict["constraints"], graph) - vertex.add_constraints(constraints) - - -def edge_to_json(edge): - json_dict = dict() - try: - json_dict["pre_vertex"] = edge.pre_vertex.label - json_dict["post_vertex"] = edge.post_vertex.label - json_dict["traffic_type"] = int(edge.traffic_type) - if edge.label is not None: - json_dict["label"] = edge.label - json_dict["traffic_weight"] = edge.traffic_weight - except Exception as ex: # pylint: disable=broad-except - json_dict["exception"] = str(ex) - return json_dict - - -def edge_from_json(json_dict, graph=None): - label = json_dict.get("label") - return MachineEdge( - vertex_lookup(json_dict["pre_vertex"], graph), - vertex_lookup(json_dict["post_vertex"], graph), - json_dict["traffic_type"], label, json_dict["traffic_weight"]) - - -def graph_to_json(graph): - # TODO Appplication vertex info needed for ZonedRoutingInfoAllocator - json_dict = dict() - try: - if graph.label is not None: - json_dict["label"] = graph.label - json_list = [] - for vertex in graph.vertices: - json_list.append(vertex_to_json(vertex)) - json_dict["vertices"] = json_list - json_list = [] - for edge in graph.edges: - json_list.append(edge_to_json(edge)) - json_dict["edges"] = json_list - except Exception as ex: # pylint: disable=broad-except - json_dict["exception"] = str(ex) - return json_dict - - -def graph_from_json(json_dict): - json_dict = json_to_object(json_dict) - graph = MachineGraph(json_dict.get("label")) - for j_vertex in json_dict["vertices"]: - graph.add_vertex(vertex_from_json(j_vertex, convert_constraints=False)) - # Only do constraints when we have all the vertexes to link to - for j_vertex in json_dict["vertices"]: - vertex_add_contstraints_from_json(j_vertex, graph) - for j_edge in json_dict["edges"]: - edge = edge_from_json(j_edge, graph) - graph.add_edge(edge, "JSON_MOCK") - return graph + return SimpleMachineVertex(resources, label=json_dict["label"]) def vertex_lookup(label, graph=None): diff --git a/pacman/utilities/utility_objs/__init__.py b/pacman/utilities/utility_objs/__init__.py index 278b3121e..78625965e 100644 --- a/pacman/utilities/utility_objs/__init__.py +++ b/pacman/utilities/utility_objs/__init__.py @@ -14,6 +14,6 @@ # along with this program. If not, see . from .field import Field, SUPPORTED_TAGS -from .resource_tracker import ResourceTracker +from .chip_counter import ChipCounter -__all__ = ["Field", "ResourceTracker", "SUPPORTED_TAGS"] +__all__ = ["Field", "SUPPORTED_TAGS", "ChipCounter"] diff --git a/pacman/utilities/utility_objs/chip_counter.py b/pacman/utilities/utility_objs/chip_counter.py new file mode 100644 index 000000000..ba455c421 --- /dev/null +++ b/pacman/utilities/utility_objs/chip_counter.py @@ -0,0 +1,64 @@ +# Copyright (c) 2021 The University of Manchester +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + + +class ChipCounter(object): + """ A counter of how many chips are needed to hold machine vertices. + This does not look at the constraints of the vertices at all. + The value produced will be a (hopefully) worst-case estimate and should + not be used to decide failure in terms of space! + """ + + __slots__ = [ + # How many time steps to plan for + "__plan_n_timesteps", + + # How many cores there are to be used on a chip + "__n_cores_per_chip", + + # How much SDRAM there is to be used on a chip + "__sdram_per_chip", + + # The number of cores free on the "current" chip + "__cores_free", + + # The SDRAM free on the "current" chip + "__sdram_free", + + # The number of chips used, including the current one + "__n_chips"] + + def __init__( + self, plan_n_timesteps=0, n_cores_per_chip=15, + sdram_per_chip=100 * 1024 * 1024): + self.__plan_n_timesteps = plan_n_timesteps + self.__n_cores_per_chip = n_cores_per_chip + self.__sdram_per_chip = sdram_per_chip + self.__cores_free = 0 + self.__sdram_free = 0 + self.__n_chips = 0 + + def add_core(self, resources): + sdram = resources.sdram.get_total_sdram(self.__plan_n_timesteps) + if self.__cores_free == 0 or self.__sdram_free < sdram: + self.__n_chips += 1 + self.__cores_free = self.__n_cores_per_chip + self.__sdram_free = self.__sdram_per_chip + self.__cores_free -= 1 + self.__sdram_free -= sdram + + @property + def n_chips(self): + return self.__n_chips diff --git a/pacman/utilities/utility_objs/resource_tracker.py b/pacman/utilities/utility_objs/resource_tracker.py deleted file mode 100644 index e03bcf7e6..000000000 --- a/pacman/utilities/utility_objs/resource_tracker.py +++ /dev/null @@ -1,1363 +0,0 @@ -# Copyright (c) 2017-2019 The University of Manchester -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . - -from collections import defaultdict -from spinn_utilities.ordered_set import OrderedSet -from spinn_machine import Processor, SDRAM -from pacman.model.constraints.placer_constraints import ( - RadialPlacementFromChipConstraint, BoardConstraint, ChipAndCoreConstraint, - AbstractPlacerConstraint) -from pacman.model.resources import ( - CoreTracker, ConstantSDRAM, CPUCyclesPerTickResource, DTCMResource, - ResourceContainer) -from pacman.utilities.utility_calls import ( - check_algorithm_can_support_constraints, check_constrained_value, - is_equal_or_None) -from pacman.exceptions import ( - PacmanCanNotFindChipException, PacmanInvalidParameterException, - PacmanValueError, PacmanException) -from sortedcollections import ValueSortedDict - - -class ResourceTracker(object): - """ Tracks the usage of resources of a machine. - """ - - __slots__ = [ - # The amount of SDRAM used by each chip, - # indexed by the (x, y) tuple of coordinates of the chip - # Note that entries are only added when the SDRAM is first used - "_sdram_tracker", - - # The set of processor IDs available on each chip, - # indexed by the (x, y) tuple of coordinates of the chip - # Note that entries are only added when a core is first used - "_core_tracker", - - # Values received by the init - # The machine object - "_machine", - # the number of timesteps that should be planned for - "_plan_n_timesteps", - # Resources to be removed from each chip - "_preallocated_resources", - - # sdram to be removed from from each ethernet chip - # calculated using _preallocated_resources and _plan_n_timestep - "_sdram_ethernet", - - # sdram to be removed from from each none ethernet chip - # calculated using _preallocated_resources and _plan_n_timestep - "_sdram_all", - - # Set of tags available indexed by board address - # Note that entries are only added when a board is first tracked - "_tags_by_board", - - # Set of boards with available IP tags - # Note that entries are only added when a board is first tracked - "_boards_with_ip_tags", - - # Set of (board_address, tag) assigned to an IP tag indexed by - # (IP address, traffic identifier) - Note not reverse IP tags - "_ip_tags_address_traffic", - - # The (IP address, traffic identifier) assigned to an IP tag indexed by - # (board address, tag) - "_address_and_traffic_ip_tag", - - # The (strip_sdp, port) assigned to an IP tag indexed by - # (board address, tag) - "_ip_tags_strip_sdp_and_port", - - # The (board address, port) combinations already assigned to a - # reverse IP tag - Note not IP tags - "_reverse_ip_tag_listen_port", - - # The port assigned to a reverse IP tag, indexed by - # (board address, tag) - Note not IP tags - "_listen_port_reverse_ip_tag", - - # A count of how many allocations are sharing the same IP tag - - # Note not reverse IP tags - "_n_ip_tag_allocations", - - # Ethernet connected chips indexed by board address - # The ones that have been tracked - "_tracked_ethernet_chips", - # The ones that have not t=yet been tracked - "_untracked_ethernet_chips", - - # Set of (x, y) tuples of coordinates of chips which have available - # processors - # Note that entries are only added when a board is first tracked - "_chips_available", - - # counter of chips that have had processors allocated to them - "_chips_used", - - # The number of chips with the n cores currently available - # Note that entries are only added when a board is first tracked - "_real_chips_with_n_cores_available", - ] - - ALLOCATION_SDRAM_ERROR = ( - "Allocating of {} bytes of SDRAM on chip {}:{} has failed as there " - "are only {} bytes of SDRAM available on the chip at this time. " - "Please fix and try again") - - def __init__(self, machine, plan_n_timesteps, chips=None, - preallocated_resources=None): - """ - :param ~spinn_machine.Machine machine: - The machine to track the usage of - :param int plan_n_timesteps: number of timesteps to plan for - :param chips: If specified, this list of chips will be used instead - of the list from the machine. Note that the order will be - maintained, so this can be used either to reduce the set of chips - used, or to re-order the chips. Note also that on deallocation, - the order is no longer guaranteed. - :type chips: iterable(tuple(int, int)) or None - :param preallocated_resources: - :type preallocated_resources: PreAllocatedResourceContainer or None - """ - - # The amount of SDRAM available on each chip, - # indexed by the (x, y) tuple of coordinates of the chip - # Items are sorted in reverse order so highest comes out first - self._sdram_tracker = ValueSortedDict(lambda x: -x) - - # The set of processor IDs available on each chip, - # indexed by the (x, y) tuple of coordinates of the chip - # Note that entries are only added when a core is first used - self._core_tracker = dict() - - # The machine object - self._machine = machine - - # The number of timesteps that should be planned for. - self._plan_n_timesteps = plan_n_timesteps - - # tracker for chips used - self._chips_used = set() - - # Set of tags available indexed by board address - # Note that entries are only added when a board is first used - self._tags_by_board = dict() - - # Set of boards with available IP tags - self._boards_with_ip_tags = OrderedSet() - - # Set of (board_address, tag) assigned - # to any IP tag, indexed by (IP address, traffic_identifier) - # - Note not reverse IP tags - self._ip_tags_address_traffic = defaultdict(set) - - # The (IP address, traffic identifier) assigned to an IP tag indexed by - # (board address, tag) - self._address_and_traffic_ip_tag = dict() - - # The (strip_sdp, port) assigned to an IP tag indexed by - # (board address, tag) - self._ip_tags_strip_sdp_and_port = dict() - - # The (board address, port) combinations already assigned to a - # reverse IP tag - Note not IP tags - self._reverse_ip_tag_listen_port = set() - - # The port assigned to a reverse IP tag, indexed by - # (board address, tag) - Note not IP tags - self._listen_port_reverse_ip_tag = dict() - - # A count of how many allocations are sharing the same IP tag - - # Note not reverse IP tags - self._n_ip_tag_allocations = dict() - - # (x, y) tuple of coordinates of Ethernet connected chip indexed by - # board address - self._tracked_ethernet_chips = dict() - self._untracked_ethernet_chips = dict() - - # set of resources that have been pre allocated and therefore need to - # be taken account of when allocating resources - self._preallocated_resources = preallocated_resources - if preallocated_resources: - self._sdram_ethernet = preallocated_resources.sdram_ethernet. \ - get_total_sdram(self._plan_n_timesteps) - self._sdram_all = preallocated_resources.sdram_all.get_total_sdram( - self._plan_n_timesteps) - else: - self._sdram_ethernet = 0 - self._sdram_all = 0 - - # update tracker for n cores available per chip - self._real_chips_with_n_cores_available = \ - [0] * (machine.max_cores_per_chip() + 1) - - # Set of (x, y) tuples of coordinates of chips which have available - # processors - self._chips_available = OrderedSet() - if chips is None: - for chip in self._machine.ethernet_connected_chips: - self._untracked_ethernet_chips[chip.ip_address] = chip - else: - for x, y in chips: - self._track_chip(x, y) - - @property - def plan_n_time_steps(self): - return self._plan_n_timesteps - - @staticmethod - def check_constraints( - vertices, additional_placement_constraints=None): - """ Check that the constraints on the given vertices are supported\ - by the resource tracker. - - :param list(AbstractVertex) vertices: - The vertices to check the constraints of - :param set(AbstractConstraint) additional_placement_constraints: - Additional placement constraints supported by the algorithm doing\ - this check - :raises PacmanInvalidParameterException: - If the constraints cannot be satisfied. - """ - - # These placement constraints are supported by the resource tracker - placement_constraints = { - ChipAndCoreConstraint, BoardConstraint, - RadialPlacementFromChipConstraint - } - if additional_placement_constraints is not None: - placement_constraints.update(additional_placement_constraints) - - # Check the placement constraints - check_algorithm_can_support_constraints( - constrained_vertices=vertices, - supported_constraints=placement_constraints, - abstract_constraint_type=AbstractPlacerConstraint) - - @staticmethod - def get_ip_tag_info(resources, constraints): - """ Get the IP tag resource information - - :param ResourceContainer resources: - The resources to get the values from - :param list(AbstractConstraint) constraints: A list of constraints - :return: - A tuple of board address, iterable of IP tag resources and - iterable of reverse IP tag resources - :rtype: tuple(str, iterable(~IptagResource), - iterable(~ReverseIPtagResource)) - """ - board_address = None - ip_tags = resources.iptags - reverse_ip_tags = resources.reverse_iptags - - for constraint in constraints: - if isinstance(constraint, BoardConstraint): - board_address = check_constrained_value( - constraint.board_address, board_address) - return board_address, ip_tags, reverse_ip_tags - - @staticmethod - def get_chip_and_core(constraints, chips=None): - """ Get an assigned chip and core from a set of constraints - - :param iterable(AbstractConstraint) constraints: - The set of constraints to get the values from. - Note that any type of constraint can be in the list but only those - relevant will be used - :param chips: Optional list of tuples of (x, y) coordinates of chips, - restricting the allowed chips - :type chips: iterable(tuple(int, int)) or None - :return: tuple of a chip x and y coordinates, and processor ID, any of - which might be None - :rtype: tuple(int or None, int or None, int or None) - """ - x = None - y = None - p = None - for constraint in constraints: - if isinstance(constraint, ChipAndCoreConstraint): - x = check_constrained_value(constraint.x, x) - y = check_constrained_value(constraint.y, y) - p = check_constrained_value(constraint.p, p) - - if chips is not None and x is not None and y is not None: - if (x, y) not in chips: - raise PacmanInvalidParameterException( - "x, y and chips", - "{}, {} and {}".format(x, y, chips), - "The constraint cannot be met with the given chips") - return x, y, p - - def _track_chip(self, x, y): - """ - Adds (if needed) a chip to the various tracker objects - - For all chips _core_tracker, _sdram_tracker and _chips_available - - For ethernet chips also _tags_by_board, _boards_with_ip_tags as - well as moving the chip from untracked to tracked_ethernet_chips - - :param int x: - :param int y: - """ - if (x, y) in self._core_tracker: - return - chip = self._machine.get_chip_at(x, y) - if chip is None: - raise PacmanInvalidParameterException( - "x and y", - "({x}, {y})", - f"There is no Chip {x}:{y} in the machine") - self._core_tracker[x, y] = CoreTracker( - chip, self._preallocated_resources, - self._real_chips_with_n_cores_available) - self._sdram_tracker[x, y] = chip.sdram.size - self._chips_available.add((x, y)) - board_address = chip.ip_address - if board_address: - self._tracked_ethernet_chips[board_address] = chip - if board_address in self._untracked_ethernet_chips: - self._untracked_ethernet_chips.pop(board_address) - if not chip.virtual: - self._sdram_tracker[x, y] -= self._sdram_ethernet - self._tags_by_board[board_address] = set(chip.tag_ids) - self._boards_with_ip_tags.add(board_address) - if self._preallocated_resources: - for ip_tag in self._preallocated_resources.iptag_resources: - tag = self._allocate_tag_id( - ip_tag.tag, chip.ip_address) - self._update_data_structures_for_iptag( - chip.ip_address, tag, ip_tag.ip_address, - ip_tag.traffic_identifier, ip_tag.strip_sdp, - ip_tag.port) - else: - if not chip.virtual: - self._sdram_tracker[x, y] -= self._sdram_all - - def _get_core_tracker(self, x, y): - """ - Gets the core tracker after making sure it exists - - :param int x: - :param int y: - :return: The core tracker with preallocated resource removed - """ - if (x, y) not in self._core_tracker: - self._track_chip(x, y) - return self._core_tracker[(x, y)] - - def _track_board(self, board_address): - """ - Adds (if needed) a board and all its chips to the tracked objects - - :param str board_address: - :raise PacmanInvalidParameterException: - * If the board address is unknown - """ - if board_address not in self._tracked_ethernet_chips: - try: - eth_chip = self._untracked_ethernet_chips.pop(board_address) - except KeyError as original: - raise PacmanInvalidParameterException( - "board_address", str(board_address), - "Unrecognised board address") from original - for (x, y) in self._machine.get_existing_xys_on_board(eth_chip): - self._track_chip(x, y) - # track_chip updates tracked_ethernet_chips - - def _get_ethernet_chip(self, board_address): - """ - Gets the ethernet chip for the board and ensure it is tracked - - :param str board_address: - :return: EthernetChip - :raise PacmanInvalidParameterException: - * If the board address is unknown - """ - self._track_board(board_address) - return self._tracked_ethernet_chips[board_address] - - def _get_usable_chips_on_baord(self, chips, board_address): - """ Get all chips that are available on a board given the constraints - - :param chips: iterable of tuples of (x, y) coordinates of chips to - look though for usable chips, or None to use all available chips - :type chips: iterable(tuple(int, int)) - :param board_address: the board address to check for usable chips on - :type board_address: str or None - :return: iterable of tuples of (x, y) coordinates of usable chips - :rtype: iterable(tuple(int, int)) - :raise PacmanInvalidParameterException: - * If the board address is unknown - * When either or both chip coordinates of any chip are none - * When a non-existent chip is specified - * When all the chips in the specified board have been used - """ - eth_chip = self._get_ethernet_chip(board_address) - - if chips is None: - for (x, y) in self._machine.get_existing_xys_on_board(eth_chip): - if self._get_core_tracker(x, y).is_available: - yield (x, y) - else: - area_code = set(self._machine.get_existing_xys_on_board(eth_chip)) - chip_found = False - for (x, y) in chips: - if ((x, y) in area_code and - self._get_core_tracker(x, y).is_available): - chip_found = True - yield (x, y) - if not chip_found: - self._check_chip_not_used(chips) - raise PacmanInvalidParameterException( - "chips and board_address", - "{} and {}".format(chips, board_address), - "No valid chips found on the specified board") - - def _get_usable_chips_any_board(self, chips): - """ Get all chips that are available on a board given the constraints - - :param chips: iterable of tuples of (x, y) coordinates of chips to - look though for usable chips, or None to use all available chips - :type chips: iterable(tuple(int, int)) - :return: iterable of tuples of (x, y) coordinates of usable chips - :rtype: iterable(tuple(int, int)) - :raise PacmanInvalidParameterException: - * If the board address is unknown - * When either or both chip coordinates of any chip are none - * When a non-existent chip is specified - * When all the chips in the specified board have been used - """ - if chips is None: - for (x, y) in self._chips_available: - if self._get_core_tracker(x, y).is_available: - yield (x, y) - for board_address in list(self._untracked_ethernet_chips): - eth_chip = self._get_ethernet_chip(board_address) - for (x, y) in self._machine.get_existing_xys_on_board( - eth_chip): - yield (x, y) - else: - chip_found = False - for (x, y) in chips: - if self._get_core_tracker(x, y).is_available: - chip_found = True - yield (x, y) - if not chip_found: - self._check_chip_not_used(chips) - raise PacmanInvalidParameterException( - "chips", - f"{chips}".format(chips), - "No valid chips found") - - def _get_usable_chips(self, chips, board_address): - """ Get all chips that are available on a board given the constraints - - :param chips: iterable of tuples of (x, y) coordinates of chips to - look though for usable chips, or None to use all available chips - :type chips: iterable(tuple(int, int)) - :param board_address: the board address to check for usable chips on - :type board_address: str or None - :return: iterable of tuples of (x, y) coordinates of usable chips - :rtype: iterable(tuple(int, int)) - :raise PacmanInvalidParameterException: - * If the board address is unknown - * When either or both chip coordinates of any chip are none - * When a non-existent chip is specified - * When all the chips in the specified board have been used - """ - if board_address is not None: - yield from self._get_usable_chips_on_baord(chips, board_address) - else: - yield from self._get_usable_chips_any_board(chips) - - def _check_chip_not_used(self, chips): - """ - Check to see if any of the candidates chip have already been used. - If not this may indicate the Chip was not there. Possibly a dead chip. - - :param chips: iterable of tuples of (x, y) coordinates of chips to - look though for usable chips, or None to use all available chips - :type chips: iterable(tuple(int, int)) - :raises PacmanCanNotFindChipException: - """ - for chip in chips: - if chip in self._chips_used: - # Not a case of all the Chips never existed - return - raise PacmanCanNotFindChipException( - "None of the chips {} were ever in the chips list".format(chips)) - - def _get_matching_ip_tag( - self, chip, board_address, tag_id, ip_address, port, strip_sdp, - traffic_identifier): - """ Attempt to locate a matching tag for the given details - - :param chip: The chip which is the source of the data for the tag - :type chip: ~spinn_machine.Chip or None - :param board_address: the board address to locate the chip on - :type board_address: str or None - :param tag_id: the tag ID to locate - :type tag_id: int or None - :param str ip_address: The IP address of the tag - :param port: The port of the tag or None if not assigned - :type port: int or None - :param bool strip_sdp: True if the tag is to strip SDP header - :param str traffic_identifier: - The identifier of the traffic to pass over this tag - :return: A board address, tag ID, and port or None, None, None if none - :rtype: tuple of (str, int, (int or None)) or (None, None, None) - """ - - # If there is no tag for the given IP address - traffic identifier - # combination, return - if ((ip_address, traffic_identifier) not in - self._ip_tags_address_traffic): - return None, None, None - - # If no board address is specified, try to allow use of the closest - # board - eth_chip = None - if board_address is None and chip is not None: - eth_chip = self._machine.get_chip_at( - chip.nearest_ethernet_x, chip.nearest_ethernet_y) - - # Scan the existing allocated tags and see if any match the details - found_board = None - found_tag = None - found_port = None - existing_tags = self._ip_tags_address_traffic[ - ip_address, traffic_identifier] - for (other_board_address, other_tag) in existing_tags: - (other_strip_sdp, other_port) = self._ip_tags_strip_sdp_and_port[ - other_board_address, other_tag] - if (is_equal_or_None(other_board_address, board_address) and - is_equal_or_None(other_tag, tag_id) and - other_strip_sdp == strip_sdp and - is_equal_or_None(other_port, port)): - - # If the existing tag is on the same board, return immediately - if (eth_chip is not None and - other_board_address == eth_chip.ip_address): - return other_board_address, other_tag, other_port - - # Otherwise store the tag for possible later use - found_board = other_board_address - found_tag = other_tag - found_port = other_port - - # If we got here, we didn't find an existing tag on the same board - # so check if the tag *could* be assigned to the current board - if eth_chip and self._is_tag_available(eth_chip.ip_address, tag_id): - - # If the tag is available, allow it to be used - return None, None, None - - # Otherwise, return any matching existing tag - return found_board, found_tag, found_port - - def _is_tag_available(self, board_address, tag): - """ Check if a tag is available given the constraints - - :param board_address: the board address to locate the chip on - :type board_address: str or None - :param tag: the tag ID to locate - :type tag: int or None - :return: True if the tag is available, False otherwise - :rtype: bool - """ - if board_address is None: - if self._untracked_ethernet_chips: - return True - if tag is None: - return bool(self._boards_with_ip_tags) - else: - for board_addr in self._boards_with_ip_tags: - if (board_addr not in self._tags_by_board or - tag in self._tags_by_board[board_addr]): - return True - return False - else: - if board_address in self._untracked_ethernet_chips: - return True - if tag is None: - return board_address in self._boards_with_ip_tags - else: - return tag in self._tags_by_board[board_address] - - def _is_ip_tag_available(self, board_address, ip_tag): - """ Check if an IP tag is available given the constraints - - :param board_address: the board address to locate the chip on - :type board_address: str or None - :param tag: the tag ID to locate - :type tag: int or None - :param str ip_address: the IP address of the tag to be assigned - :param port: the port number of the tag to be assigned - :type port: int or None - :param bool strip_sdp: - if the IP tag has to be able to strip the SDP header - :param str traffic_identifier: The type of traffic for the tag - :return: True if a matching IP tag is available, False otherwise - :rtype: bool - """ - - # If equivalent traffic is being used by another IP tag, re-use it - (b_address, _, _) = self._get_matching_ip_tag( - None, board_address, ip_tag.tag, ip_tag.ip_address, ip_tag.port, - ip_tag.strip_sdp, ip_tag.traffic_identifier) - if b_address is not None: - return True - - # Otherwise determine if another tag is available - return self._is_tag_available(board_address, ip_tag.tag) - - def _are_ip_tags_available(self, board_address, ip_tags): - """ Check if the set of tags are available using the given chip,\ - given the constraints - - :param board_address: the board to allocate IP tags on - :type board_address: str or None - :param ip_tags: The IP tag resource - :type ip_tags: iterable(IptagResource) or None - :return: True if the tags can be allocated, False otherwise - :rtype: bool - """ - # If there are no tags to assign, declare that they are available - if ip_tags is None or not ip_tags: - return True - - # Check if each of the tags is available - return all( - self._is_ip_tag_available(board_address, ip_tag) - for ip_tag in ip_tags) - - def _is_reverse_ip_tag_available(self, board_address, tag, port): - """ Check if the reverse IP tag is available given the constraints - - :param board_address: The board address to use - :type board_address: str or None - :param tag: The tag to be used - :type tag: int or None - :param port: The port that the tag will listen on on the board - :type port: int or None - :return: True if the tag is available, false otherwise - :rtype: int - """ - if board_address is not None: - - # If the board address is not None, and the port is already - # assigned, the tag is not available - if (port is not None and - (board_address, port) in self._reverse_ip_tag_listen_port): - return False - - # If the port is available, return true if the tag is available - return self._is_tag_available(board_address, tag) - - # If the board address is not None but the port is already used - # everywhere that the tag is available, the tag is not available. - # Note that if port is None, any tag just has to be available - if self._untracked_ethernet_chips: - return True - if port is None: - for addr in self._boards_with_ip_tags: - if self._is_tag_available(addr, tag): - return True - else: - for addr in self._boards_with_ip_tags: - if (addr, port) not in self._reverse_ip_tag_listen_port and\ - self._is_tag_available(addr, tag): - return True - return False - - def _are_reverse_ip_tags_available( - self, board_address, reverse_ip_tags): - """ Check if this chip can be used given the reverse IP tag resources - - :param board_address: the board to allocate IP tags on - :type board_address: str or None - :param reverse_ip_tags: The reverse IP tag resource to be met - :type reverse_ip_tags: iterable(ReverseIptagResource) or None - :return: True if the chip can be used, False otherwise - :rtype: bool - """ - # If there are no tags, declare they are available - if reverse_ip_tags is None or not reverse_ip_tags: - return True - - return all( - self._is_reverse_ip_tag_available(board_address, rip.tag, rip.port) - for rip in reverse_ip_tags) - - def _allocate_tag(self, chip, board_address, tag_id): - """ Allocate a tag given the constraints - - :param ~spinn_machine.Chip chip: - The chip containing the source of data for this tag - :param board_address: the board address to allocate to - :type board_address: str or None - :param tag_id: the tag ID to allocate on this board address - :type tag_id: int or None - :return: (board address, tag) - :rtype: tuple(str, int) - """ - - # First try to find a tag on the board closest to the chip - if board_address is None: - eth_chip = self._machine.get_chip_at( - chip.nearest_ethernet_x, chip.nearest_ethernet_y) - - # verify if the Ethernet chip has the available tag ID - if self._is_tag_available(eth_chip.ip_address, tag_id): - board_address = eth_chip.ip_address - - if board_address is None: - if tag_id is not None: - for b_address in self._boards_with_ip_tags: - if (b_address not in self._tags_by_board or - tag_id in self._tags_by_board[b_address]): - board_address = b_address - break - else: - if self._boards_with_ip_tags: - board_address = self._boards_with_ip_tags.peek() - - if board_address is None: - for board_address in self._untracked_ethernet_chips: - break - - tag_id = self._allocate_tag_id(tag_id, board_address) - - if not self._tags_by_board[board_address]: - self._boards_with_ip_tags.remove(board_address) - return board_address, tag_id - - def _allocate_tag_id(self, tag_id, board_address): - """ Locates a tag ID for the IP tag - - :param tag_id: tag ID to get, or None - :type tag_id: int or None - :param str board_address: board address - :return: tag ID allocated - :rtype: int - """ - self._track_board(board_address) - if tag_id is None: - return self._tags_by_board[board_address].pop() - self._tags_by_board[board_address].remove(tag_id) - return tag_id - - def _allocate_ip_tags(self, chip, board_address, ip_tags): - """ Allocate the given set of IP tag resources - - :param ~spinn_machine.Chip chip: The chip to allocate the tags for - :param board_address: The board address to allocate on - :type board_address: str or None - :param iterable(IptagResource) ip_tags: - The IP tag resources to allocate - :return: iterable of tuples of (board address, tag) assigned - :rtype: iterable(tuple(str, int, int, int)) or None - """ - if ip_tags is None or not ip_tags: - return None - - allocations = list() - for ip_tag in ip_tags: - - # Find a tag that matches the one required - (b_address, a_tag, a_port) = self._get_matching_ip_tag( - chip, board_address, ip_tag.tag, ip_tag.ip_address, - ip_tag.port, ip_tag.strip_sdp, ip_tag.traffic_identifier) - - if b_address is not None: - # Get the chip with the Ethernet - e_chip = self._get_ethernet_chip(b_address) - - # If there is already an allocation that matches the current - # tag, return this as the allocated tag - allocations.append((b_address, a_tag, e_chip.x, e_chip.y)) - - # Add to the number of things allocated to the tag - self._n_ip_tag_allocations[b_address, a_tag] += 1 - - # If the port is None and the requested port is not None, - # update the port number - if a_port is None and ip_tag.port is not None: - self._ip_tags_strip_sdp_and_port[b_address, a_tag] =\ - (ip_tag.strip_sdp, a_port) - else: - - # Allocate an IP tag - (board_address, tag) = self._allocate_tag( - chip, board_address, ip_tag.tag) - self._update_data_structures_for_iptag( - board_address, tag, ip_tag.ip_address, - ip_tag.traffic_identifier, ip_tag.strip_sdp, ip_tag.port) - - # Get the chip with the Ethernet - e_chip = self._get_ethernet_chip(board_address) - - allocations.append((board_address, tag, e_chip.x, e_chip.y)) - return allocations - - def _update_data_structures_for_iptag(self, board_address, tag, ip_address, - traffic_identifier, strip_sdp, port): - """ - :param str board_address: - :param int tag: - :param str ip_address: - :param str traffic_identifier: - :param bool strip_sdp: - :param int port: - """ - tag_key = (board_address, tag) - existing_tags = self._ip_tags_address_traffic[ - ip_address, traffic_identifier] - existing_tags.add(tag_key) - self._ip_tags_strip_sdp_and_port[tag_key] = (strip_sdp, port) - self._address_and_traffic_ip_tag[tag_key] = \ - (ip_address, traffic_identifier) - - # Remember how many allocations are sharing this tag - # in case an deallocation is requested - self._n_ip_tag_allocations[tag_key] = 1 - - def _allocate_reverse_ip_tags(self, chip, board_address, reverse_ip_tags): - """ Allocate reverse IP tags with the given constraints - - :param ~spinn_machine.Chip chip: The chip to allocate the tags for - :param board_address: the board address to allocate on - :type board_address: str or None - :param iterable(ReverseIptagResource) reverse_ip_tags: - The reverse IP tag resources - :return: iterable of tuples of (board address, tag) assigned - :rtype: iterable(tuple(str, int)) - """ - if reverse_ip_tags is None or not reverse_ip_tags: - return None - - allocations = list() - for reverse_ip_tag in reverse_ip_tags: - (board_address, tag) = self._allocate_tag( - chip, board_address, reverse_ip_tag.tag) - allocations.append((board_address, tag)) - self._update_structures_for_reverse_ip_tag( - board_address, tag, reverse_ip_tag.port) - return allocations - - def _update_structures_for_reverse_ip_tag(self, board_address, tag, port): - """ Updates the structures for reverse IP tags - - :param str board_address: the board its going to be placed on - :param int tag: the tag ID - :param port: the port number - :type port: int or None - :rtype: None - """ - if port is not None: - self._reverse_ip_tag_listen_port.add((board_address, port)) - self._listen_port_reverse_ip_tag[board_address, tag] = port - - def allocate_constrained_resources( - self, resources, constraints, chips=None): - """ Attempts to use the given resources of the machine, constrained\ - by the given placement constraints. - - :param ResourceContainer resources: The resources to be allocated - :param list(AbstractConstraint) constraints: - The constraints to consider - :param iterable(tuple(int,int)) chips: - The optional list of (x, y) tuples of chip coordinates of chips - that can be used. Note that any chips passed in previously will - be ignored - :return: - The x and y coordinates of the used chip, the processor_id, - and the IP tag and reverse IP tag allocation tuples - :rtype: tuple(int, int, int, list(tuple(int, int, int, int)), - list(tuple(int, int))) - :raise PacmanValueError: - If the constraints cannot be met given the\ - current allocation of resources - """ - (x, y, p) = self.get_chip_and_core(constraints, chips) - (board_address, ip_tags, reverse_ip_tags) = \ - self.get_ip_tag_info(resources, constraints) - if x is not None and y is not None: - chips = [(x, y)] - - return self.allocate_resources( - resources, chips, p, board_address, ip_tags, reverse_ip_tags) - - def allocate_constrained_group_resources( - self, resource_and_constraint_list, chips=None): - """ Allocates a group of cores on the same chip for these resources - - :param resource_and_constraint_list: - A list of tuples of (resources, list of constraints) to allocate - :type resource_and_constraint_list: - list(tuple(ResourceContainer,AbstractConstraint)) - :param iterable(tuple(int,int)) chips: - A list of chips that can be used - :return: list of The x and y coordinates of the used chip, the - processor_id, and the IP tag and reverse IP tag allocation tuples - :rtype: iterable(tuple(int, int, int, list(tuple(int, int, int, int)), - list(tuple(int, int)))) - """ - if chips: - chips = list(chips) - x = None - y = None - board_address = None - processor_ids = list() - group_ip_tags = list() - group_reverse_ip_tags = list() - for (resources, constraints) in resource_and_constraint_list: - this_board, this_ip_tags, this_reverse_ip_tags = \ - self.get_ip_tag_info(resources, constraints) - this_x, this_y, this_p = self.get_chip_and_core(constraints, chips) - - if (self.__different(x, this_x) or self.__different(y, this_y) or - (this_p is not None and this_p in processor_ids) or - self.__different(board_address, this_board)): - raise PacmanException("Cannot merge conflicting constraints") - x = x if this_x is None else this_x - y = y if this_y is None else this_y - board_address = board_address if this_board is None else this_board - - processor_ids.append(this_p) - group_ip_tags.append(this_ip_tags) - group_reverse_ip_tags.append(this_reverse_ip_tags) - - if x is not None and y is not None: - chips = [(x, y)] - - # try to allocate in one block - group_resources = [item[0] for item in resource_and_constraint_list] - - return self._allocate_group_resources( - group_resources, chips, processor_ids, board_address, - group_ip_tags, group_reverse_ip_tags) - - @staticmethod - def __different(a, b): - """ - :rtype: bool - """ - return a is not None and b is not None and a != b - - def _allocate_group_resources( - self, group_resources, chips=None, processor_ids=None, - board_address=None, group_ip_tags=None, - group_reverse_ip_tags=None): - """ Attempts to use the given group of resources on a single chip of - the machine. Can be given specific place to use the resources, or - else it will allocate them on the first place that the resources - of the group fit together. - - :param list(ResourceContainer) group_resources: - The resources to be allocated - :param iterable(tuple(int,int)) chips: - An iterable of (x, y) tuples of chips that are to be used - :param processor_ids: The specific processor to use on any chip for - each resource of the group - :type processor_ids: list(int or None) - :param str board_address: - the board address to allocate resources of a chip - :param list(list(IPtagResource)) group_ip_tags: - list of lists of IP tag resources - :param list(list(ReverseIPtagResource)) group_reverse_ip_tags: - list of lists of reverse IP tag resources - :return: An iterable of tuples of the x and y coordinates of the used - chip, the processor_id, and the IP tag and reverse IP tag - allocation tuples - :rtype: iterable(tuple(int, int, int, list(tuple(int, int, int, int)), - list(tuple(int, int)))) - :raises PacmanValueError: - If there aren't chips available that can take the allocation. - """ - - usable_chips = self._get_usable_chips(chips, board_address) - - total_sdram = 0 - for resources in group_resources: - total_sdram += resources.sdram.get_total_sdram( - self._plan_n_timesteps) - - # Make arrays to make the next bit work - if not group_ip_tags: - group_ip_tags = [None for _ in group_resources] - if not group_reverse_ip_tags: - group_reverse_ip_tags = [None for _ in group_resources] - if not processor_ids: - processor_ids = [None for _ in group_resources] - - # Find the first usable chip which fits all the group resources - tried_chips = list() - for key in usable_chips: - (chip_x, chip_y) = key - tried_chips.append(key) - chip = self._machine.get_chip_at(chip_x, chip_y) - - # No point in continuing if the chip doesn't have space for - # everything - tracker = self._get_core_tracker(chip_x, chip_y) - if (tracker.n_cores_available >= len(group_resources) and - self._sdram_tracker[(chip_x, chip_y)] >= total_sdram): - - # Check that the constraints of all the resources can be met - is_available = True - for resources, processor_id, ip_tags, reverse_ip_tags in zip( - group_resources, processor_ids, group_ip_tags, - group_reverse_ip_tags): - if (not tracker.is_core_available(processor_id) or - not self._are_ip_tags_available( - board_address, ip_tags) or - not self._are_reverse_ip_tags_available( - board_address, reverse_ip_tags)): - is_available = False - break - - # If everything is good, do the allocation - if is_available: - results = list() - for resources, proc_id, ip_tags, reverse_ip_tags in zip( - group_resources, processor_ids, group_ip_tags, - group_reverse_ip_tags): - self._chips_used.add(key) - processor_id = tracker.allocate(proc_id) - self._sdram_tracker[key] -= \ - resources.sdram.get_total_sdram( - self._plan_n_timesteps) - ip_tags_allocated = self._allocate_ip_tags( - chip, board_address, ip_tags) - reverse_ip_tags_allocated = \ - self._allocate_reverse_ip_tags( - chip, board_address, reverse_ip_tags) - results.append(( - chip.x, chip.y, processor_id, ip_tags_allocated, - reverse_ip_tags_allocated)) - return results - - # If no chip is available, raise an exception - resources = self._available_resources(tried_chips) - all_chips = self._get_usable_chips(None, None) - all_resources = self._available_resources(all_chips) - raise PacmanValueError( - "No resources available to allocate the given group resources" - " within the given constraints:\n" - " Request for {} cores on a single chip with SDRAM: {}\n" - " Resources available which meet constraints:\n" - " {}\n" - " All Resources:\n" - " {}".format(len(group_resources), total_sdram, resources, - all_resources)) - - def allocate_resources( - self, resources, chips=None, processor_id=None, - board_address=None, ip_tags=None, reverse_ip_tags=None): - """ Attempts to use the given resources of the machine. Can be given - specific place to use the resources, or else it will allocate them on - the first place that the resources fit. - - :param ResourceContainer resources: The resources to be allocated - :param vertices: list of vertices for these resources - :param iterable(tuple(int,int)) chips: - An iterable of (x, y) tuples of chips that are to be used - :param int processor_id: The specific processor to use on any chip. - :param str board_address: - The board address to allocate resources of a chip - :param iterable(IPtagResource) ip_tags: iterable of IP tag resources - :param iterable(ReverseIPtagResource) reverse_ip_tags: - iterable of reverse IP tag resources - :return: The x and y coordinates of the used chip, the processor_id, - and the IP tag and reverse IP tag allocation tuples - :rtype: tuple(int, int, int, list(tuple(int, int, int, int)), - list(tuple(int, int))) - :raises PacmanValueError: - If there isn't a chip available that can take the allocation. - """ - # Find the first usable chip which fits the resources - for (chip_x, chip_y) in self._get_usable_chips(chips, board_address): - chip = self._machine.get_chip_at(chip_x, chip_y) - key = (chip_x, chip_y) - tracker = self._get_core_tracker(chip_x, chip_y) - sdram_available = self._sdram_tracker[chip_x, chip_y] >= \ - resources.sdram.get_total_sdram(self._plan_n_timesteps) - - if (tracker.is_core_available(processor_id) and - sdram_available and - self._are_ip_tags_available(board_address, ip_tags) and - self._are_reverse_ip_tags_available(board_address, - reverse_ip_tags)): - self._chips_used.add(key) - processor_id = tracker.allocate(processor_id) - self._sdram_tracker[chip_x, chip_y] -= \ - resources.sdram.get_total_sdram(self._plan_n_timesteps) - ip_tags_allocated = self._allocate_ip_tags( - chip, board_address, ip_tags) - reverse_ip_tags_allocated = self._allocate_reverse_ip_tags( - chip, board_address, reverse_ip_tags) - return (chip.x, chip.y, processor_id, ip_tags_allocated, - reverse_ip_tags_allocated) - - # If no chip is available, raise an exception - if chips is not None and processor_id is not None: - if len(chips) == 1: - (x, y) = chips[0] - raise PacmanValueError( - "Core {}:{}:{} is not available.".format( - x, y, processor_id)) - else: - raise PacmanValueError( - "Processor id {} is not available on any of the chips" - "".format(processor_id)) - message = \ - f"No resources available to allocate the given resources" \ - f" within the given constraints:\n" \ - f" Request for CPU: {resources.cpu_cycles.get_value()}, " \ - f"DTCM: {resources.dtcm.get_value()}, " \ - f"SDRAM fixed: {resources.sdram.fixed} " \ - f"per_timestep: {resources.sdram.per_timestep}, " \ - f"IP TAGS: {resources.iptags}, {resources.reverse_iptags}\n" \ - f" Planning to run for {self._plan_n_timesteps} timesteps.\n" - tried_chips = list(self._get_usable_chips(chips, board_address)) - left_resources = self._available_resources(tried_chips) - if len(tried_chips) < 60: - message += f" Resources available which meet constraints:\n" \ - f" {left_resources}\n" - all_chips = list(self._get_usable_chips(None, None)) - if len(all_chips) < 60: - all_resources = self._available_resources(all_chips) - message += f" All resources available:\n" \ - f" {all_resources}\n" - raise PacmanValueError(message) - - def _available_resources(self, usable_chips): - """ Describe how much of the various resource types are available. - - :param iterable(tuple(int,int)) usable_chips: - Coordinates of usable chips - :return: dict of board address to board resources - :rtype: dict - """ - resources_for_chips = dict() - for x, y in usable_chips: - resources_for_chip = dict() - resources_for_chip["coords"] = (x, y) - tracker = self._get_core_tracker(x, y) - resources_for_chip["n_cores"] = tracker.n_cores_available - resources_for_chip["sdram"] = self._sdram_tracker[x, y] - resources_for_chips[x, y] = resources_for_chip - resources = dict() - - # make sure all boards are tracked - for board_address in list(self._untracked_ethernet_chips): - self._track_board(board_address) - - for board_address in self._boards_with_ip_tags: - eth_chip = self._get_ethernet_chip(board_address) - board_resources = dict() - board_resources["n_tags"] = (len( - self._tags_by_board[board_address])) - chips = list() - for xy in self._machine.get_existing_xys_on_board(eth_chip): - chip_resources = resources_for_chips.get(xy) - if chip_resources is not None: - chips.append(chip_resources) - if chips: - board_resources["chips"] = chips - resources[board_address] = board_resources - - return resources - - def get_maximum_cores_available_on_a_chip(self): - """ Returns the number of available cores of a real chip with the - maximum number of available cores - - :return: the max cores available on the best real chip - :rtype: int - """ - if self._untracked_ethernet_chips: - # assume at least one chip on the board will have the max - return self._machine.max_cores_per_chip() - for n_cores_available, n_chips_with_n_cores in reversed(list( - enumerate(self._real_chips_with_n_cores_available))): - if n_chips_with_n_cores != 0: - return n_cores_available - - def get_maximum_constrained_resources_available( - self, resources, constraints): - """ Get the maximum resources available given the constraints - - :param ResourceContainer resources: The resources of the item to check - :param iterable(AbstractConstraint) constraints: - :rtype: ResourceContainer - """ - (board_address, ip_tags, reverse_ip_tags) = self.get_ip_tag_info( - resources, constraints) - - if not self._are_ip_tags_available(board_address, ip_tags): - return ResourceContainer() - if not self._are_reverse_ip_tags_available( - board_address, reverse_ip_tags): - return ResourceContainer() - - area_code = None - if board_address is not None: - eth_chip = self._get_ethernet_chip(board_address) - area_code = set(self._machine.get_existing_xys_on_board(eth_chip)) - - (x, y, p) = self.get_chip_and_core(constraints) - if x is not None and y is not None: - tracker = self._get_core_tracker(x, y) - if not tracker.is_available: - return ResourceContainer() - if area_code is not None and (x, y) not in area_code: - return ResourceContainer() - best_processor_id = p - chip = self._machine.get_chip_at(x, y) - sdram_available = self._sdram_tracker[(x, y)] - if p is not None and not tracker.is_core_available(p): - return ResourceContainer() - if p is None: - best_processor_id = tracker.available_core() - processor = chip.get_processor_with_id(best_processor_id) - max_dtcm_available = processor.dtcm_available - max_cpu_available = processor.cpu_cycles_available - return ResourceContainer( - DTCMResource(max_dtcm_available), - ConstantSDRAM(sdram_available), - CPUCyclesPerTickResource(max_cpu_available)) - - return self._get_maximum_resources_available(area_code) - - def _get_maximum_resources_available(self, area_code=None): - """ Get the maximum resources available - - :param area_code: A set of valid (x, y) coordinates to choose from - :type area_code: iterable(tuple(int,int)) or None - :return: a resource which shows max resources available - :rtype: ResourceContainer - """ - if self._untracked_ethernet_chips: - # Assume at least one chip will have the maximum - return ResourceContainer( - DTCMResource(Processor.DTCM_AVAILABLE), - ConstantSDRAM(SDRAM.DEFAULT_SDRAM_BYTES), - CPUCyclesPerTickResource(Processor.CLOCK_SPEED // 1000)) - - # Go through the chips in order of sdram - for ((chip_x, chip_y), sdram_available) in self._sdram_tracker.items(): - tracker = self._get_core_tracker(chip_x, chip_y) - if tracker.is_available and ( - area_code is None or (chip_x, chip_y) in area_code): - chip = self._machine.get_chip_at(chip_x, chip_y) - best_processor_id = tracker.available_core() - processor = chip.get_processor_with_id(best_processor_id) - max_dtcm_available = processor.dtcm_available - max_cpu_available = processor.cpu_cycles_available - return ResourceContainer( - DTCMResource(max_dtcm_available), - ConstantSDRAM(sdram_available), - CPUCyclesPerTickResource(max_cpu_available)) - - # Send the maximums - # If nothing is available, return nothing - return ResourceContainer() - - def unallocate_resources(self, chip_x, chip_y, processor_id, resources, - ip_tags, reverse_ip_tags): - """ Undo the allocation of resources - - :param int chip_x: the x coord of the chip allocated - :param int chip_y: the y coord of the chip allocated - :param int processor_id: the processor ID - :param ResourceContainer resources: The resources to be unallocated - :param ip_tags: the details of the IP tags allocated - :type ip_tags: iterable(tuple(str, int)) or None - :param reverse_ip_tags: the details of the reverse IP tags allocated - :type reverse_ip_tags: iterable(tuple(str, int)) or None - :rtype: None - """ - - self._chips_available.add((chip_x, chip_y)) - self._sdram_tracker[chip_x, chip_y] += \ - resources.sdram.get_total_sdram(self._plan_n_timesteps) - - tracker = self._get_core_tracker(chip_x, chip_y) - tracker.deallocate(processor_id) - - # check if chip used needs updating - # if (len(self._core_tracker[chip_x, chip_y]) == - # self._machine.get_chip_at(chip_x, chip_y).n_user_processors): - # self._chips_used.remove((chip_x, chip_y)) - - # Deallocate the IP tags - if ip_tags is not None: - for (board_address, tag, _, _) in ip_tags: - self._boards_with_ip_tags.add(board_address) - tag_key = (board_address, tag) - self._n_ip_tag_allocations[tag_key] -= 1 - if self._n_ip_tag_allocations[tag_key] == 0: - key = self._address_and_traffic_ip_tag[tag_key] - del self._address_and_traffic_ip_tag[tag_key] - self._ip_tags_address_traffic[key].remove(tag_key) - if not self._ip_tags_address_traffic[key]: - del self._ip_tags_address_traffic[key] - self._tags_by_board[board_address].add(tag) - del self._ip_tags_strip_sdp_and_port[tag_key] - - # Deallocate the reverse IP tags - if reverse_ip_tags is not None: - for (board_address, tag) in reverse_ip_tags: - self._boards_with_ip_tags.add(board_address) - self._tags_by_board[board_address].add(tag) - port = self._listen_port_reverse_ip_tag.get( - (board_address, tag), None) - if port is not None: - del self._listen_port_reverse_ip_tag[board_address, tag] - self._reverse_ip_tag_listen_port.remove( - (board_address, port)) - - def is_chip_available(self, chip_x, chip_y): - """ Check if a given chip is available - - :param int chip_x: the x coord of the chip - :param int chip_y: the y coord of the chip - :return: True if the chip is available, False otherwise - :rtype: bool - """ - return self._get_core_tracker(chip_x, chip_y).is_available - - @property - def keys(self): - """ The chip coordinates assigned - - :rtype: set(tuple(int,int)) - """ - return self._chips_used - - @property - def chips_used(self): - """ The number of chips used in this allocation. - - :rtype: int - """ - return len(self._chips_used) diff --git a/unittests/model_tests/application_graph_tests/test_application_graph.py b/unittests/model_tests/application_graph_tests/test_application_graph.py index 1d8b016f5..6d8a7018f 100644 --- a/unittests/model_tests/application_graph_tests/test_application_graph.py +++ b/unittests/model_tests/application_graph_tests/test_application_graph.py @@ -48,7 +48,7 @@ def test_create_new_graph(self): assert frozenset(edges) == frozenset(graph.edges) assert edge1 not in graph.get_edges_ending_at_vertex(vert1) - assert edge2 not in graph.get_edges_starting_at_vertex(vert1) + assert edge2 not in set(graph.get_edges_starting_at_vertex(vert1)) assert edge3 not in graph.get_edges_ending_at_vertex(vert1) second = graph.clone() diff --git a/unittests/model_tests/application_graph_tests/test_application_vertex.py b/unittests/model_tests/application_graph_tests/test_application_vertex.py index df316bcc0..1c102d7d8 100644 --- a/unittests/model_tests/application_graph_tests/test_application_vertex.py +++ b/unittests/model_tests/application_graph_tests/test_application_vertex.py @@ -21,9 +21,8 @@ from pacman.model.constraints.key_allocator_constraints import ( FixedKeyAndMaskConstraint) -from pacman.model.graphs.application import ApplicationGraph from pacman.model.graphs.common import Slice -from pacman.model.graphs.machine import SimpleMachineVertex, MachineGraph +from pacman.model.graphs.machine import SimpleMachineVertex from pacman_test_objects import SimpleTestVertex @@ -143,23 +142,6 @@ def test_create_new_vertex_from_vertex_with_additional_constraints( self.assertIn(constraint1, subv_from_vert.constraints) self.assertIn(constraint2, subv_from_vert.constraints) - def test_machine_vertices(self): - app_graph = ApplicationGraph("super bacon") - machine_graph = MachineGraph("bacon", application_graph=app_graph) - vert = SimpleTestVertex(12, "New AbstractConstrainedVertex", 256) - sub1 = vert.create_machine_vertex( - Slice(0, 7), - vert.get_resources_used_by_atoms(Slice(0, 7)), "M1") - sub2 = vert.create_machine_vertex( - Slice(7, 11), - vert.get_resources_used_by_atoms(Slice(7, 11)), "M2") - machine_graph.add_vertex(sub1) - machine_graph.add_vertex(sub2) - self.assertIn(sub1, vert.machine_vertices) - self.assertIn(sub2, vert.machine_vertices) - self.assertIn(Slice(0, 7), vert.vertex_slices) - self.assertIn(Slice(7, 11), vert.vertex_slices) - def test_round_n_atoms(self): # .1 is not exact in floating point near = .1 + .1 + .1 + .1 + .1 + .1 + .1 + .1 + .1 + .1 diff --git a/unittests/model_tests/graph_mapper_tests/test_graph_mapping.py b/unittests/model_tests/graph_mapper_tests/test_graph_mapping.py index 9412d1ea5..f6157db17 100644 --- a/unittests/model_tests/graph_mapper_tests/test_graph_mapping.py +++ b/unittests/model_tests/graph_mapper_tests/test_graph_mapping.py @@ -21,9 +21,8 @@ from pacman.config_setup import unittest_setup from pacman.model.graphs.application import ApplicationGraph from pacman.model.graphs.common import Slice -from pacman.model.graphs.machine import MachineEdge, SimpleMachineVertex, \ - MachineGraph -from pacman_test_objects import SimpleTestEdge, SimpleTestVertex +from pacman.model.graphs.machine import SimpleMachineVertex +from pacman_test_objects import SimpleTestVertex class TestGraphMapping(unittest.TestCase): @@ -34,27 +33,6 @@ class TestGraphMapping(unittest.TestCase): def setUp(self): unittest_setup() - def test_get_edges_from_edge(self): - """ - test getting the edges from a graph mapper from a edge - """ - vertices = list() - edges = list() - vertices.append(SimpleMachineVertex(None, "")) - vertices.append(SimpleMachineVertex(None, "")) - edges.append(MachineEdge(vertices[0], vertices[1])) - edges.append(MachineEdge(vertices[1], vertices[1])) - sube = MachineEdge(vertices[1], vertices[0]) - edges.append(sube) - edge = SimpleTestEdge(SimpleTestVertex(10, "pre"), - SimpleTestVertex(5, "post")) - edge.remember_associated_machine_edge(sube) - edge.remember_associated_machine_edge(edges[0]) - edges_from_edge = edge.machine_edges - self.assertIn(sube, edges_from_edge) - self.assertIn(edges[0], edges_from_edge) - self.assertNotIn(edges[1], edges_from_edge) - def test_get_vertices_from_vertex(self): """ test getting the vertex from a graph mapper via the vertex @@ -65,13 +43,12 @@ def test_get_vertices_from_vertex(self): app_graph.add_vertex(vert) vertices.append(SimpleMachineVertex(None, "")) vertices.append(SimpleMachineVertex(None, "")) - mac_graph = MachineGraph("cooked bacon", application_graph=app_graph) vertex1 = SimpleMachineVertex( None, "", vertex_slice=Slice(0, 1), app_vertex=vert) vertex2 = SimpleMachineVertex( None, "", vertex_slice=Slice(2, 3), app_vertex=vert) - mac_graph.add_vertex(vertex1) - mac_graph.add_vertex(vertex2) + vert.remember_machine_vertex(vertex1) + vert.remember_machine_vertex(vertex2) returned_vertices = vert.machine_vertices @@ -91,40 +68,13 @@ def test_get_vertex_from_vertex(self): vertex_slice=Slice(0, 1)) vertex2 = SimpleMachineVertex(None, "", app_vertex=vert, vertex_slice=Slice(2, 3)) - machine_graph = MachineGraph( - application_graph=app_graph, label="cooked_bacon") - machine_graph.add_vertex(vertex1) - machine_graph.add_vertex(vertex2) + vert.remember_machine_vertex(vertex1) + vert.remember_machine_vertex(vertex2) self.assertEqual(vert, vertex1.app_vertex) self.assertEqual(vert, vertex2.app_vertex) self.assertEqual([vertex1, vertex2], list(vert.machine_vertices)) - def test_get_edge_from_machine_edge(self): - """ - test that tests getting a edge from a graph mapper - """ - vertices = list() - vertices.append(SimpleMachineVertex(None, "")) - vertices.append(SimpleMachineVertex(None, "")) - - edge = SimpleTestEdge(SimpleTestVertex(10, "pre"), - SimpleTestVertex(5, "post")) - - edges = list() - edges.append(MachineEdge(vertices[0], vertices[1], app_edge=edge)) - edges.append(MachineEdge(vertices[1], vertices[1])) - - sube = MachineEdge(vertices[1], vertices[0], app_edge=edge) - edges.append(sube) - - edge.remember_associated_machine_edge(sube) - edge.remember_associated_machine_edge(edges[0]) - - self.assertEqual(sube.app_edge, edge) - self.assertEqual(edges[0].app_edge, edge) - self.assertIsNone(edges[1].app_edge) - if __name__ == '__main__': unittest.main() diff --git a/unittests/model_tests/machine_graph_tests/__init__.py b/unittests/model_tests/machine_graph_tests/__init__.py deleted file mode 100644 index d358f58a8..000000000 --- a/unittests/model_tests/machine_graph_tests/__init__.py +++ /dev/null @@ -1,14 +0,0 @@ -# Copyright (c) 2017-2019 The University of Manchester -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . diff --git a/unittests/model_tests/machine_graph_tests/test_machine_graph_model.py b/unittests/model_tests/machine_graph_tests/test_machine_graph_model.py deleted file mode 100644 index adcc89e68..000000000 --- a/unittests/model_tests/machine_graph_tests/test_machine_graph_model.py +++ /dev/null @@ -1,349 +0,0 @@ -# Copyright (c) 2017-2019 The University of Manchester -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . - -import unittest -from pacman.config_setup import unittest_setup -from pacman.model.graphs.application import ApplicationGraph -from pacman.model.graphs.common import EdgeTrafficType -from pacman.model.graphs.machine import ( - ConstantSDRAMMachinePartition, MachineEdge, MachineGraph, - MachineGraphView, MulticastEdgePartition, SDRAMMachineEdge, - SimpleMachineVertex) -from pacman.exceptions import ( - PacmanAlreadyExistsException, PacmanConfigurationException, - PacmanInvalidParameterException) -from pacman_test_objects import MockMachineVertex, SimpleTestVertex - - -class TestMachineGraphModel(unittest.TestCase): - """ - Tests that test the functionality of the machine graph object - """ - - def setUp(self): - unittest_setup() - - def test_new_vertex(self): - """ - test the creation of a machine vertex - """ - SimpleMachineVertex(None, "") - - def test_new_empty_graph(self): - """ - test that the creation of a empty machine graph works - """ - MachineGraph("foo") - - def check_new_graph(self, app_graph, app_vertex): - """ - tests that after building a machine graph, all partitined vertices - and partitioned edges are in existence - """ - vertices = list() - edges = list() - for i in range(10): - vertices.append(SimpleMachineVertex( - None, "", app_vertex=app_vertex)) - for i in range(5): - edges.append(MachineEdge(vertices[0], vertices[(i + 1)])) - for i in range(5, 10): - edges.append(MachineEdge( - vertices[5], vertices[(i + 1) % 10])) - graph = MachineGraph("foo", application_graph=app_graph) - graph.add_vertices(vertices) - graph.add_outgoing_edge_partition( - MulticastEdgePartition(vertices[0], "bar")) - graph.add_outgoing_edge_partition( - MulticastEdgePartition(vertices[5], "bar")) - graph.add_edges(edges, "bar") - outgoing = set(graph.get_edges_starting_at_vertex(vertices[0])) - for i in range(5): - assert edges[i] in outgoing, \ - "edges[" + str(i) + "] is not in outgoing and should be" - for i in range(5, 10): - assert edges[i] not in outgoing, \ - "edges[" + str(i) + "] is in outgoing and shouldn't be" - - incoming = set(graph.get_edges_ending_at_vertex(vertices[0])) - - assert edges[9] in incoming, \ - "edges[9] is not in incoming and should be" - for i in range(9): - assert edges[i] not in incoming, \ - "edges[" + str(i) + "] is in incoming and shouldn't be" - - vertices_from_graph = list(graph.vertices) - for vert in vertices_from_graph: - self.assertIn(vert, vertices) - for vert in vertices: - self.assertEqual(vert, graph.vertex_by_label(vert.label)) - edges_from_graph = list(graph.edges) - for edge in edges_from_graph: - self.assertIn(edge, edges) - - if app_graph: - with self.assertRaises(PacmanInvalidParameterException): - graph.clone() - else: - second = graph.clone() - self.assertEqual(graph.n_vertices, second.n_vertices) - vertices_from_graph = list(second.vertices) - for vert in vertices_from_graph: - self.assertIn(vert, vertices) - for vert in vertices: - self.assertEqual(vert, graph.vertex_by_label(vert.label)) - self.assertEqual(graph.n_outgoing_edge_partitions, - second.n_outgoing_edge_partitions) - edges_from_graph = list(second.edges) - for edge in edges_from_graph: - self.assertIn(edge, edges) - self.assertEqual(len(edges_from_graph), len(edges)) - - third = MachineGraphView(graph) - self.assertEqual(graph.n_vertices, third.n_vertices) - vertices_from_graph = list(third.vertices) - for vert in vertices_from_graph: - self.assertIn(vert, vertices) - for vert in vertices: - self.assertEqual(vert, graph.vertex_by_label(vert.label)) - self.assertEqual(graph.n_outgoing_edge_partitions, - third.n_outgoing_edge_partitions) - edges_from_graph = list(third.edges) - for edge in edges_from_graph: - self.assertIn(edge, edges) - self.assertEqual(len(edges_from_graph), len(edges)) - with self.assertRaises(PacmanConfigurationException): - third.add_edge("mock", "mock") - with self.assertRaises(PacmanConfigurationException): - third.add_vertex("mock") - with self.assertRaises(PacmanConfigurationException): - third.add_outgoing_edge_partition("mock") - - def test_new_graph_no_app(self): - self.check_new_graph(None, None) - - def test_new_graph_with_app(self): - self.check_new_graph( - ApplicationGraph("test"), SimpleTestVertex(12, "app1")) - - def test_add_duplicate_vertex(self): - """ - testing that adding the same machine vertex twice will cause an - error - """ - vertices = list() - edges = list() - subv = SimpleMachineVertex(None, "bacon") - vertices.append(subv) - vertices.append(SimpleMachineVertex(None, "eggs")) - vertices.append(subv) - edges.append(MachineEdge(vertices[0], vertices[1])) - edges.append(MachineEdge(vertices[1], vertices[0])) - graph = MachineGraph("foo") - with self.assertRaises(PacmanAlreadyExistsException): - graph.add_vertices(vertices) - graph.add_outgoing_edge_partition( - MulticastEdgePartition(vertices[0], "bar")) - graph.add_outgoing_edge_partition( - MulticastEdgePartition(vertices[1], "bar")) - graph.add_edges(edges, "bar") - - def test_add_duplicate_edge(self): - """ - test that adding the same machine edge will cause an error - """ - vertices = list() - edges = list() - vertices.append(SimpleMachineVertex(None, "")) - vertices.append(SimpleMachineVertex(None, "")) - edge = MachineEdge(vertices[0], vertices[1]) - edges.append(edge) - edges.append(edge) - graph = MachineGraph("foo") - graph.add_vertices(vertices) - graph.add_outgoing_edge_partition( - MulticastEdgePartition(vertices[0], "bar")) - with self.assertRaises(PacmanAlreadyExistsException): - graph.add_edges(edges, "bar") - - def test_all_have_app_vertex(self): - app_graph = ApplicationGraph("Test") - graph = MachineGraph("foo", app_graph) - app1 = SimpleTestVertex(12, "app1") - mach1 = SimpleMachineVertex("mach1", app_vertex=app1) - mach2 = SimpleMachineVertex("mach2", app_vertex=app1) - mach3 = SimpleMachineVertex("mach3", app_vertex=None) - graph.add_vertices([mach1, mach2]) - with self.assertRaises(PacmanInvalidParameterException): - graph.add_vertex(mach3) - - def test_none_have_app_vertex(self): - app_graph = ApplicationGraph("Test") - graph = MachineGraph("foo", app_graph) - app1 = SimpleTestVertex(12, "app1") - mach1 = SimpleMachineVertex("mach1", app_vertex=None) - mach2 = SimpleMachineVertex("mach2", app_vertex=None) - mach3 = SimpleMachineVertex("mach3", app_vertex=app1) - graph.add_vertices([mach1, mach2]) - with self.assertRaises(PacmanInvalidParameterException): - graph.add_vertex(mach3) - - def test_no_app_graph_no_app_vertex(self): - graph = MachineGraph("foo") - app1 = SimpleTestVertex(12, "app1") - mach1 = SimpleMachineVertex("mach1", app_vertex=app1) - mach2 = SimpleMachineVertex("mach2", app_vertex=None) - mach3 = SimpleMachineVertex("mach3", app_vertex=app1) - with self.assertRaises(PacmanInvalidParameterException): - graph.add_vertex(mach1) - graph.add_vertex(mach2) - with self.assertRaises(PacmanInvalidParameterException): - graph.add_vertex(mach3) - - def test_add_edge_with_no_existing_pre_vertex_in_graph(self): - """ - test that adding a edge where the pre vertex has not been added - to the machine graph causes an error - """ - vertices = list() - edges = list() - vertices.append(SimpleMachineVertex(None, "")) - vertices.append(SimpleMachineVertex(None, "")) - edges.append(MachineEdge(vertices[0], vertices[1])) - vertex_extra = SimpleMachineVertex(None, "") - edges.append(MachineEdge(vertex_extra, vertices[0])) - with self.assertRaises(PacmanInvalidParameterException): - graph = MachineGraph("foo") - graph.add_vertices(vertices) - graph.add_outgoing_edge_partition( - MulticastEdgePartition(vertices[0], "ba")) - graph.add_outgoing_edge_partition( - MulticastEdgePartition(vertex_extra, "bar")) - graph.add_edges(edges, "bar") - - def test_add_edge_with_no_existing_post_vertex_in_graph(self): - """ - test that adding a edge where the post vertex has not been added - to the machine graph causes an error - """ - vertices = list() - edges = list() - vertices.append(SimpleMachineVertex(None, "")) - vertices.append(SimpleMachineVertex(None, "")) - edges.append(MachineEdge(vertices[0], vertices[1])) - edges.append(MachineEdge(vertices[0], SimpleMachineVertex(None, ""))) - with self.assertRaises(PacmanInvalidParameterException): - graph = MachineGraph("foo") - graph.add_vertices(vertices) - graph.add_outgoing_edge_partition( - MulticastEdgePartition(vertices[0], "bar")) - graph.add_edges(edges, "bar") - - def test_remember_machine_vertex(self): - app_graph = ApplicationGraph("Test") - graph = MachineGraph("foo", app_graph) - app1 = SimpleTestVertex(12, "app1") - app2 = SimpleTestVertex(12, "app2") - mach1 = SimpleMachineVertex("mach1", app_vertex=app1) - mach2 = SimpleMachineVertex("mach2", app_vertex=app1) - mach3 = SimpleMachineVertex("mach3", app_vertex=app1) - mach4 = SimpleMachineVertex("mach4", app_vertex=app2) - self.assertEquals(0, len(app1.machine_vertices)) - self.assertEquals(0, len(app2.machine_vertices)) - graph.add_vertices([mach1, mach2]) - graph.add_vertex(mach3) - graph.add_vertex(mach4) - self.assertEquals(3, len(app1.machine_vertices)) - self.assertEquals(1, len(app2.machine_vertices)) - self.assertIn(mach1, app1.machine_vertices) - self.assertIn(mach2, app1.machine_vertices) - self.assertIn(mach3, app1.machine_vertices) - self.assertIn(mach4, app2.machine_vertices) - - def test_at_vertex_methods(self): - graph = MachineGraph("foo") - mach1 = MockMachineVertex("mach1", sdram_requirement=0) - mach2 = MockMachineVertex("mach2", sdram_requirement=0) - mach3 = MockMachineVertex("mach3", sdram_requirement=0) - mach4 = SimpleMachineVertex("mach4") - graph.add_vertices([mach1, mach2, mach3, mach4]) - - # Add partition then edge - part_m_1 = MulticastEdgePartition(mach1, "spikes") - graph.add_outgoing_edge_partition(part_m_1) - edge_m_11 = MachineEdge( - mach1, mach2, traffic_type=EdgeTrafficType.MULTICAST) - graph.add_edge(edge_m_11, "spikes") - # check clear error it you add the edge again - with self.assertRaises(PacmanAlreadyExistsException): - graph.add_edge(edge_m_11, "spikes") - self.assertIn(edge_m_11, part_m_1.edges) - edge_m_12 = MachineEdge( - mach1, mach3, traffic_type=EdgeTrafficType.MULTICAST) - graph.add_edge(edge_m_12, "spikes") - edge_m_21 = MachineEdge( - mach3, mach4, traffic_type=EdgeTrafficType.MULTICAST) - graph.add_edge(edge_m_21, "spikes") - part_m_2 = graph.get_outgoing_partition_for_edge(edge_m_21) - - edge_f_1 = MachineEdge( - mach1, mach3, traffic_type=EdgeTrafficType.FIXED_ROUTE) - graph.add_edge(edge_f_1, "Control") - part_f = graph.get_outgoing_partition_for_edge(edge_f_1) - - part_s_1 = ConstantSDRAMMachinePartition("ram", mach1, "ram1") - graph.add_outgoing_edge_partition(part_s_1) - edge_s_11 = SDRAMMachineEdge(mach1, mach2, "s1") - graph.add_edge(edge_s_11, "ram") - edge_s_12 = SDRAMMachineEdge(mach1, mach3, "s2") - graph.add_edge(edge_s_12, "ram") - - starting_at_mach1 = list( - graph.get_outgoing_edge_partitions_starting_at_vertex(mach1)) - self.assertIn(part_m_1, starting_at_mach1) - self.assertIn(part_f, starting_at_mach1) - self.assertIn(part_s_1, starting_at_mach1) - self.assertEqual(3, len(starting_at_mach1)) - - starting_at_mach3 = list( - graph.get_outgoing_edge_partitions_starting_at_vertex(mach3)) - self.assertIn(part_m_2, starting_at_mach3) - self.assertEqual(1, len(starting_at_mach3)) - - starting_at_mach4 = list( - graph.get_outgoing_edge_partitions_starting_at_vertex(mach4)) - self.assertEqual(0, len(starting_at_mach4)) - - ending_at_mach2 = list( - graph.get_edge_partitions_ending_at_vertex(mach2)) - self.assertIn(part_m_1, ending_at_mach2) - self.assertIn(part_s_1, ending_at_mach2) - self.assertEqual(2, len(ending_at_mach2)) - - ending_at_mach3 = list( - graph.get_edge_partitions_ending_at_vertex(mach3)) - self.assertIn(part_m_1, ending_at_mach3) - self.assertIn(part_f, ending_at_mach3) - self.assertIn(part_s_1, ending_at_mach3) - self.assertEqual(3, len(ending_at_mach3)) - - ending_at_mach1 = list( - graph.get_edge_partitions_ending_at_vertex(mach1)) - self.assertEqual(0, len(ending_at_mach1)) - - -if __name__ == '__main__': - unittest.main() diff --git a/unittests/model_tests/partitioner_splitters_tests/splitter_slice_legacy_test.py b/unittests/model_tests/partitioner_splitters_tests/splitter_slice_legacy_test.py index 8c984a112..a5d2fd72f 100644 --- a/unittests/model_tests/partitioner_splitters_tests/splitter_slice_legacy_test.py +++ b/unittests/model_tests/partitioner_splitters_tests/splitter_slice_legacy_test.py @@ -17,12 +17,12 @@ from testfixtures import LogCapture from pacman.config_setup import unittest_setup from pacman.exceptions import PacmanConfigurationException -from pacman.model.partitioner_splitters import SplitterSliceLegacy +from pacman.model.partitioner_splitters import SplitterFixedLegacy from pacman_test_objects import ( DuckLegacyApplicationVertex, NonLegacyApplicationVertex, SimpleTestVertex) -class TestSplitterSliceLegacy(unittest.TestCase): +class TestSplitterFixedLegacy(unittest.TestCase): """ Tester for pacman.model.constraints.placer_constraints """ @@ -30,13 +30,13 @@ def setUp(self): unittest_setup() def test_no_api(self): - splitter = SplitterSliceLegacy() + splitter = SplitterFixedLegacy() vertex = NonLegacyApplicationVertex() with self.assertRaises(PacmanConfigurationException): splitter.set_governed_app_vertex(vertex) def test_with_methods(self): - splitter = SplitterSliceLegacy() + splitter = SplitterFixedLegacy() vertex = DuckLegacyApplicationVertex() with LogCapture() as lc: splitter.set_governed_app_vertex(vertex) @@ -47,7 +47,7 @@ def test_with_methods(self): self.assertTrue(found) def test_with_api(self): - splitter = SplitterSliceLegacy() + splitter = SplitterFixedLegacy() vertex = SimpleTestVertex(12) with LogCapture() as lc: splitter.set_governed_app_vertex(vertex) diff --git a/unittests/model_tests/placement_tests/test_placement_constraints.py b/unittests/model_tests/placement_tests/test_placement_constraints.py index d32ef725f..4e2fb3902 100644 --- a/unittests/model_tests/placement_tests/test_placement_constraints.py +++ b/unittests/model_tests/placement_tests/test_placement_constraints.py @@ -15,10 +15,7 @@ import unittest from pacman.config_setup import unittest_setup -from pacman.model.constraints.placer_constraints import ( - BoardConstraint, ChipAndCoreConstraint, RadialPlacementFromChipConstraint, - SameChipAsConstraint) -from pacman.model.graphs.machine import SimpleMachineVertex +from pacman.model.constraints.placer_constraints import ChipAndCoreConstraint class TestPlacementConstraints(unittest.TestCase): @@ -28,19 +25,6 @@ class TestPlacementConstraints(unittest.TestCase): def setUp(self): unittest_setup() - def test_board_constraint(self): - c1 = BoardConstraint("1.2.3.4") - self.assertEqual(c1.board_address, "1.2.3.4") - self.assertEqual(c1, BoardConstraint("1.2.3.4")) - self.assertEqual(str(c1), 'BoardConstraint(board_address="1.2.3.4")') - c2 = BoardConstraint("4.3.2.1") - self.assertNotEqual(c1, c2) - self.assertNotEqual(c1, "1.2.3.4") - d = {} - d[c1] = 1 - d[c2] = 2 - self.assertEqual(len(d), 2) - def test_chip_and_core_constraint(self): c1 = ChipAndCoreConstraint(1, 2) self.assertEqual(c1.x, 1) @@ -59,46 +43,3 @@ def test_chip_and_core_constraint(self): d[c2] = 2 d[c3] = 3 self.assertEqual(len(d), 3) - - def test_radial_placement_from_chip_constraint(self): - c1 = RadialPlacementFromChipConstraint(1, 2) - self.assertEqual(c1.x, 1) - self.assertEqual(c1.y, 2) - self.assertEqual(c1, RadialPlacementFromChipConstraint(1, 2)) - self.assertEqual(str(c1), - 'RadialPlacementFromChipConstraint(x=1, y=2)') - c2 = RadialPlacementFromChipConstraint(2, 1) - self.assertNotEqual(c1, c2) - self.assertNotEqual(c1, "1.2.3.4") - d = {} - d[c1] = 1 - d[c2] = 2 - self.assertEqual(len(d), 2) - - def test_same_chip_as_constraint(self): - v1 = SimpleMachineVertex(None, "v1") - v2 = SimpleMachineVertex(None, "v2") - c1 = SameChipAsConstraint(v1) - c2 = SameChipAsConstraint(v1) - c3 = SameChipAsConstraint(v2) - c4 = SameChipAsConstraint(v2) - - self.assertEqual(c1.vertex, v1) - self.assertEqual(str(c1), "SameChipAsConstraint(vertex=v1)") - self.assertEqual(str(c4), "SameChipAsConstraint(vertex=v2)") - - self.assertEqual(c1, c2) - self.assertEqual(c2, c1) - self.assertEqual(c3, c4) - self.assertNotEqual(c1, c3) - self.assertNotEqual(c3, c1) - self.assertNotEqual(c2, c4) - - d = {} - d[c1] = 1 - d[c2] = 2 - d[c3] = 3 - d[c4] = 4 - self.assertEqual(len(d), 2) - self.assertEqual(d[c1], 2) - self.assertEqual(d[c3], 4) diff --git a/unittests/model_tests/routing_info_tests/test_routing_info.py b/unittests/model_tests/routing_info_tests/test_routing_info.py index 90ebe9e7c..0ddd1e1e7 100644 --- a/unittests/model_tests/routing_info_tests/test_routing_info.py +++ b/unittests/model_tests/routing_info_tests/test_routing_info.py @@ -15,14 +15,12 @@ import unittest from pacman.config_setup import unittest_setup -from pacman.model.graphs.machine import MulticastEdgePartition from pacman.model.resources import ResourceContainer from pacman.exceptions import ( PacmanAlreadyExistsException, PacmanConfigurationException) from pacman.model.routing_info import ( - RoutingInfo, BaseKeyAndMask, PartitionRoutingInfo, - DictBasedMachinePartitionNKeysMap) -from pacman.model.graphs.machine import MachineEdge, SimpleMachineVertex + RoutingInfo, BaseKeyAndMask, MachineVertexRoutingInfo) +from pacman.model.graphs.machine import SimpleMachineVertex from pacman.utilities.constants import FULL_MASK @@ -32,81 +30,57 @@ def setUp(self): unittest_setup() def test_routing_info(self): - # mock to avoid having to create a graph for this test - graph_code = 123 pre_vertex = SimpleMachineVertex(resources=ResourceContainer()) - partition = MulticastEdgePartition(pre_vertex, "Test") - partition.register_graph_code(graph_code) # This is a hack - post_vertex = SimpleMachineVertex(resources=ResourceContainer()) - edge = MachineEdge(pre_vertex, post_vertex) key = 12345 - partition_info = PartitionRoutingInfo( - [BaseKeyAndMask(key, FULL_MASK)], partition) - partition.add_edge(edge, graph_code) - routing_info = RoutingInfo([partition_info]) + info = MachineVertexRoutingInfo( + [BaseKeyAndMask(key, FULL_MASK)], "Test", pre_vertex, 0) + routing_info = RoutingInfo() + routing_info.add_routing_info(info) with self.assertRaises(PacmanAlreadyExistsException): - routing_info.add_partition_info(partition_info) - - assert routing_info.get_first_key_from_partition(partition) == key - assert routing_info.get_first_key_from_partition(None) is None - - assert routing_info.get_routing_info_from_partition(partition) == \ - partition_info - assert routing_info.get_routing_info_from_partition(None) is None + routing_info.add_routing_info(info) assert routing_info.get_routing_info_from_pre_vertex( - pre_vertex, "Test") == partition_info + pre_vertex, "Test") == info assert routing_info.get_routing_info_from_pre_vertex( - post_vertex, "Test") is None + None, "Test") is None assert routing_info.get_routing_info_from_pre_vertex( pre_vertex, "None") is None assert routing_info.get_first_key_from_pre_vertex( pre_vertex, "Test") == key assert routing_info.get_first_key_from_pre_vertex( - post_vertex, "Test") is None + None, "Test") is None assert routing_info.get_first_key_from_pre_vertex( pre_vertex, "None") is None - assert routing_info.get_routing_info_for_edge(edge) == partition_info - assert routing_info.get_routing_info_for_edge(None) is None + assert next(iter(routing_info)) == info - assert routing_info.get_first_key_for_edge(edge) == key - assert routing_info.get_first_key_for_edge(None) is None + info2 = MachineVertexRoutingInfo( + [BaseKeyAndMask(key, FULL_MASK)], "Test", pre_vertex, 0) - assert next(iter(routing_info)) == partition_info + with self.assertRaises(PacmanAlreadyExistsException): + routing_info.add_routing_info(info2) + assert info != info2 - partition2 = MulticastEdgePartition(pre_vertex, "Test") - partition2.register_graph_code(graph_code) # This is a hack - partition2.add_edge(MachineEdge(pre_vertex, post_vertex), graph_code) + info3 = MachineVertexRoutingInfo( + [BaseKeyAndMask(key, FULL_MASK)], "Test2", pre_vertex, 0) + routing_info.add_routing_info(info3) + assert info != info3 + assert routing_info.get_routing_info_from_pre_vertex( + pre_vertex, "Test2") !=\ + routing_info.get_routing_info_from_pre_vertex( + pre_vertex, "Test") + assert routing_info.get_routing_info_from_pre_vertex( + pre_vertex, "Test2").get_keys().tolist() == [key] - with self.assertRaises(PacmanAlreadyExistsException): - routing_info.add_partition_info(PartitionRoutingInfo( - [BaseKeyAndMask(key, FULL_MASK)], partition2)) - assert partition != partition2 - - partition3 = MulticastEdgePartition(pre_vertex, "Test2") - partition3.register_graph_code(graph_code) # This is a hack - partition3.add_edge(MachineEdge(pre_vertex, post_vertex), graph_code) - routing_info.add_partition_info(PartitionRoutingInfo( - [BaseKeyAndMask(key, FULL_MASK)], partition3)) - - assert routing_info.get_routing_info_from_partition(partition) != \ - routing_info.get_routing_info_from_partition(partition3) - assert partition != partition3 - assert routing_info.get_routing_info_from_partition( - partition3).get_keys().tolist() == [key] - - partition4 = MulticastEdgePartition(pre_vertex, "Test4") - partition4.register_graph_code(graph_code) # This is a hack - partition4.add_edge(MachineEdge(pre_vertex, post_vertex), graph_code) - routing_info.add_partition_info(PartitionRoutingInfo( + info4 = MachineVertexRoutingInfo( [BaseKeyAndMask(key, FULL_MASK), - BaseKeyAndMask(key * 2, FULL_MASK)], partition4)) + BaseKeyAndMask(key * 2, FULL_MASK)], "Test4", pre_vertex, 0) + routing_info.add_routing_info(info4) - assert routing_info.get_routing_info_from_partition( - partition4).get_keys().tolist() == [key, key * 2] + assert routing_info.get_routing_info_from_pre_vertex( + pre_vertex, "Test4").get_keys().tolist() == [key, key * 2] def test_base_key_and_mask(self): with self.assertRaises(PacmanConfigurationException): @@ -123,15 +97,6 @@ def test_base_key_and_mask(self): assert k.tolist() == [1073741824, 1073741825] assert n == 2 - def test_dict_based_machine_partition_n_keys_map(self): - pmap = DictBasedMachinePartitionNKeysMap() - p1 = MulticastEdgePartition(None, "foo") - p2 = MulticastEdgePartition(None, "bar") - pmap.set_n_keys_for_partition(p1, 1) - pmap.set_n_keys_for_partition(p2, 2) - assert pmap.n_keys_for_partition(p1) == 1 - assert pmap.n_keys_for_partition(p2) == 2 - if __name__ == "__main__": unittest.main() diff --git a/unittests/model_tests/routing_table_tests/test_routing_tables_model.py b/unittests/model_tests/routing_table_tests/test_routing_tables_model.py index d852d6b20..78363617d 100644 --- a/unittests/model_tests/routing_table_tests/test_routing_tables_model.py +++ b/unittests/model_tests/routing_table_tests/test_routing_tables_model.py @@ -15,7 +15,6 @@ import unittest from pacman.config_setup import unittest_setup -from pacman.model.graphs.machine import MulticastEdgePartition from spinn_machine import MulticastRoutingEntry from pacman.model.routing_tables import ( UnCompressedMulticastRoutingTable, MulticastRoutingTables) @@ -26,6 +25,7 @@ from pacman.exceptions import ( PacmanAlreadyExistsException, PacmanInvalidParameterException) from pacman.utilities import file_format_schemas +from pacman.model.graphs.machine import SimpleMachineVertex class TestRoutingTable(unittest.TestCase): @@ -72,16 +72,6 @@ def test_new_multicast_routing_table(self): mre = mrt.multicast_routing_entries for entry in mre: self.assertIn(entry, multicast_entries) - self.assertEqual(len(mre), len(multicast_entries)) - for i in range(5): - self.assertEqual( - mrt.get_multicast_routing_entry_by_routing_entry_key( - key_combo + i, mask + i), - multicast_entries[i]) - self.assertEqual(mrt.get_multicast_routing_entry_by_routing_entry_key( - key_combo + 5, mask + 5), None) - self.assertEqual(mrt.get_multicast_routing_entry_by_routing_entry_key( - key_combo - 1, mask - 1), None) def test_new_multicast_routing_table_empty(self): """ @@ -190,52 +180,55 @@ def test_add_routing_table_for_duplicate_chip(self): def test_multicast_routing_table_by_partition(self): mrt = MulticastRoutingTableByPartition() - partition = MulticastEdgePartition(None, "foo") - entry = MulticastRoutingTableByPartitionEntry(range(4), range(2)) - mrt.add_path_entry(entry, 0, 0, partition) - entry = MulticastRoutingTableByPartitionEntry( - range(4, 8), range(2, 4)) - mrt.add_path_entry(entry, 0, 0, partition) + partition_id = "foo" + source_vertex = SimpleMachineVertex(resources=None) + entry = MulticastRoutingTableByPartitionEntry(range(2), range(4)) + mrt.add_path_entry(entry, 0, 0, source_vertex, partition_id) + entry = MulticastRoutingTableByPartitionEntry(range(2, 4), range(4, 8)) + mrt.add_path_entry(entry, 0, 0, source_vertex, partition_id) entry = MulticastRoutingTableByPartitionEntry( - range(8, 12), range(4, 6)) - mrt.add_path_entry(entry, 0, 0, partition) + range(4, 6), range(8, 12)) + mrt.add_path_entry(entry, 0, 0, source_vertex, partition_id) assert list(mrt.get_routers()) == [(0, 0)] assert len(mrt.get_entries_for_router(0, 0)) == 1 - assert next(iter(mrt.get_entries_for_router(0, 0))) == partition - mre = mrt.get_entries_for_router(0, 0)[partition] + assert next(iter(mrt.get_entries_for_router(0, 0))) == ( + source_vertex, partition_id) + mre = mrt.get_entries_for_router(0, 0)[source_vertex, partition_id] assert str(mre) == ( - "None:None:False:{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}" - ":{0, 1, 2, 3, 4, 5}") - assert mre == mrt.get_entry_on_coords_for_edge(partition, 0, 0) + "None:None:False:" + "{0, 1, 2, 3, 4, 5}:{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}") + assert mre == mrt.get_entry_on_coords_for_edge( + source_vertex, partition_id, 0, 0) def test_multicast_routing_table_by_partition_entry(self): - e1 = MulticastRoutingTableByPartitionEntry(range(18), range(6)) with self.assertRaises(PacmanInvalidParameterException): - MulticastRoutingTableByPartitionEntry(range(18), range(6), 4, 3) + MulticastRoutingTableByPartitionEntry(range(6), range(18), 4, 3) + with self.assertRaises(ValueError): + MulticastRoutingTableByPartitionEntry(7, 18) + with self.assertRaises(ValueError): + MulticastRoutingTableByPartitionEntry(6, 19) + e1 = MulticastRoutingTableByPartitionEntry(range(6), range(18)) e2 = MulticastRoutingTableByPartitionEntry( - range(4), range(2), incoming_processor=4) + range(2), range(4), incoming_processor=4) e3 = MulticastRoutingTableByPartitionEntry( - range(12, 16), range(3, 5), incoming_link=3) - with self.assertRaises(PacmanInvalidParameterException): - MulticastRoutingTableByPartitionEntry(range(18), range(6), - incoming_link=[]) - e4 = MulticastRoutingTableByPartitionEntry(16, 2) + range(3, 5), range(12, 16), incoming_link=3) + e4 = MulticastRoutingTableByPartitionEntry(2, 16) e5 = MulticastRoutingTableByPartitionEntry(None, None) - assert str(e2) == "None:4:False:{0, 1, 2, 3}:{0, 1}" - assert str(e3) == "3:None:False:{12, 13, 14, 15}:{3, 4}" + assert str(e2) == "None:4:False:{0, 1}:{0, 1, 2, 3}" + assert str(e3) == "3:None:False:{3, 4}:{12, 13, 14, 15}" with self.assertRaises(PacmanInvalidParameterException): e2.merge_entry(e3) e6 = e2.merge_entry(MulticastRoutingTableByPartitionEntry( - range(12, 16), range(3, 5))) - assert str(e2) == "None:4:False:{0, 1, 2, 3}:{0, 1}" + range(3, 5), range(12, 16))) + assert str(e2) == "None:4:False:{0, 1}:{0, 1, 2, 3}" assert str(e6) == ( - "None:4:False:{0, 1, 2, 3, 12, 13, 14, 15}:{0, 1, 3, 4}") + "None:4:False:{0, 1, 3, 4}:{0, 1, 2, 3, 12, 13, 14, 15}") e6 = e3.merge_entry(MulticastRoutingTableByPartitionEntry( - range(4), range(2))) - assert str(e3) == "3:None:False:{12, 13, 14, 15}:{3, 4}" + range(2), range(4))) + assert str(e3) == "3:None:False:{3, 4}:{12, 13, 14, 15}" assert str(e6) == ( - "3:None:False:{0, 1, 2, 3, 12, 13, 14, 15}:{0, 1, 3, 4}") - assert str(e4.merge_entry(e5)) == "None:None:False:{16}:{2}" + "3:None:False:{0, 1, 3, 4}:{0, 1, 2, 3, 12, 13, 14, 15}") + assert str(e4.merge_entry(e5)) == "None:None:False:{2}:{16}" assert str(e1) == str(e5.merge_entry(e1)) # NB: Have true object identity; we have setters! assert e5 != MulticastRoutingTableByPartitionEntry(None, None) diff --git a/unittests/model_tests/splitter_tests/test_one_app_one_machine.py b/unittests/model_tests/splitter_tests/test_one_app_one_machine.py index 50bc82c3f..363d3cd65 100644 --- a/unittests/model_tests/splitter_tests/test_one_app_one_machine.py +++ b/unittests/model_tests/splitter_tests/test_one_app_one_machine.py @@ -20,6 +20,7 @@ AbstractOneAppOneMachineVertex) from pacman.model.partitioner_splitters import SplitterOneAppOneMachine from pacman_test_objects import NonLegacyApplicationVertex +from pacman.model.graphs.machine import SimpleMachineVertex class TestSplitterOneAppOneMachine(unittest.TestCase): @@ -35,7 +36,8 @@ def test_legacy(self): with self.assertRaises(PacmanConfigurationException): splitter.set_governed_app_vertex(v1) v2 = AbstractOneAppOneMachineVertex( - machine_vertex=None, label="v1", constraints=None) + machine_vertex=SimpleMachineVertex(None), + label="v1", constraints=None) splitter.set_governed_app_vertex(v2) a = str(splitter) self.assertIsNotNone(a) diff --git a/unittests/model_tests/splitter_tests/test_splitter_slice_legacy.py b/unittests/model_tests/splitter_tests/test_splitter_fixed_legacy.py similarity index 87% rename from unittests/model_tests/splitter_tests/test_splitter_slice_legacy.py rename to unittests/model_tests/splitter_tests/test_splitter_fixed_legacy.py index e102ad719..beac0ec5a 100644 --- a/unittests/model_tests/splitter_tests/test_splitter_slice_legacy.py +++ b/unittests/model_tests/splitter_tests/test_splitter_fixed_legacy.py @@ -16,18 +16,18 @@ import unittest from pacman.config_setup import unittest_setup from pacman.exceptions import PacmanConfigurationException -from pacman.model.partitioner_splitters import SplitterSliceLegacy +from pacman.model.partitioner_splitters import SplitterFixedLegacy from pacman_test_objects import ( DuckLegacyApplicationVertex, NonLegacyApplicationVertex, SimpleTestVertex) -class TestSplitterSliceLegacy(unittest.TestCase): +class TestSplitterFixedLegacy(unittest.TestCase): def setUp(self): unittest_setup() def test_api(self): - splitter = SplitterSliceLegacy("foo") + splitter = SplitterFixedLegacy("foo") a = str(splitter) self.assertIsNotNone(a) v1 = SimpleTestVertex(1, "v1") @@ -40,12 +40,12 @@ def test_api(self): splitter.set_governed_app_vertex(v2) def test_not_api(self): - splitter = SplitterSliceLegacy("foo") + splitter = SplitterFixedLegacy("foo") v1 = NonLegacyApplicationVertex("v1") with self.assertRaises(PacmanConfigurationException): splitter.set_governed_app_vertex(v1) def test_legacy(self): - splitter = SplitterSliceLegacy("foo") + splitter = SplitterFixedLegacy("foo") v1 = DuckLegacyApplicationVertex("v1") splitter.set_governed_app_vertex(v1) diff --git a/unittests/model_tests/test_key_allocator.py b/unittests/model_tests/test_key_allocator.py index 414cfc264..b04748472 100644 --- a/unittests/model_tests/test_key_allocator.py +++ b/unittests/model_tests/test_key_allocator.py @@ -16,10 +16,8 @@ import unittest from pacman.config_setup import unittest_setup from pacman.model.routing_info import BaseKeyAndMask -from pacman.utilities.utility_objs import Field from pacman.model.constraints.key_allocator_constraints import ( - ContiguousKeyRangeContraint, FixedKeyAndMaskConstraint, - FixedKeyFieldConstraint, FixedMaskConstraint) + ContiguousKeyRangeContraint, FixedKeyAndMaskConstraint) class TestKeyAllocatorConstraints(unittest.TestCase): @@ -51,10 +49,9 @@ def test_fixed_key_and_mask_constraint(self): c4 = FixedKeyAndMaskConstraint([ km, BaseKeyAndMask(0xFE0, 0xFF8)]) self.assertEqual(c1, c2) - self.assertIsNone(c1.key_list_function) self.assertEqual(c1.keys_and_masks, [km]) r = ("FixedKeyAndMaskConstraint(keys_and_masks=[KeyAndMask:0xff0:" - "0xff8], key_list_function=None)") + "0xff8], partition=None)") self.assertEqual(str(c1), r) d = {} d[c1] = 1 @@ -64,45 +61,3 @@ def test_fixed_key_and_mask_constraint(self): self.assertEqual(d[c1], 2) self.assertNotEqual(c4, c1) self.assertNotEqual(c1, c4) - - def test_fixed_key_field_constraint(self): - c1 = FixedKeyFieldConstraint([ - Field(1, 2, 3, name="foo")]) - c1a = FixedKeyFieldConstraint([ - Field(1, 2, 3, name="foo")]) - c2 = FixedKeyFieldConstraint([ - Field(1, 2, 7)]) - c3 = FixedKeyFieldConstraint([ - Field(1, 2, 3), Field(1, 2, 96), Field(1, 2, 12)]) - self.assertEqual(c1, c1a) - self.assertNotEqual(c1, c2) - self.assertNotEqual(c1, c3) - self.assertNotEqual(c3, c1) - r = ("FixedKeyFieldConstraint(fields=[" - "Field(lo=1, hi=2, value=3, tag=3, name=foo)])") - self.assertEqual(str(c1), r) - self.assertEqual([f.value for f in c3.fields], [96, 12, 3]) - d = {} - d[c1] = 1 - d[c1a] = 2 - d[c2] = 3 - d[c3] = 4 - self.assertEqual(len(d), 3) - self.assertEqual(d[c1], 2) - - def test_fixed_mask_constraint(self): - c1 = FixedMaskConstraint(0xFF0) - self.assertEqual(c1.mask, 4080) - c2 = FixedMaskConstraint(0xFF0) - c3 = FixedMaskConstraint(0xFE0) - self.assertEqual(c1, c2) - self.assertNotEqual(c1, c3) - self.assertNotEqual(c3, c1) - r = "FixedMaskConstraint(mask=4080)" - self.assertEqual(str(c1), r) - d = {} - d[c1] = 1 - d[c2] = 2 - d[c3] = 3 - self.assertEqual(len(d), 2) - self.assertEqual(d[c1], 2) diff --git a/unittests/model_tests/test_partition.py b/unittests/model_tests/test_partition.py deleted file mode 100644 index 09c5e5172..000000000 --- a/unittests/model_tests/test_partition.py +++ /dev/null @@ -1,45 +0,0 @@ -# Copyright (c) 2017-2019 The University of Manchester -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . - -import unittest -from pacman.config_setup import unittest_setup - -from pacman.model.graphs.machine import SimpleMachineVertex -from pacman.model.constraints.partitioner_constraints import ( - SameAtomsAsVertexConstraint) - - -class TestPartitionConstraints(unittest.TestCase): - """ Tester for pacman.model.constraints.partitioner_constraints - """ - - def setUp(self): - unittest_setup() - - def test_same_atoms_as_vertex_constraint(self): - with self.assertRaises(NotImplementedError): - v1 = SimpleMachineVertex(None, "v1") - v2 = SimpleMachineVertex(None, "v2") - c1 = SameAtomsAsVertexConstraint(v1) - self.assertEqual(c1.vertex, v1) - self.assertEqual(c1, SameAtomsAsVertexConstraint(v1)) - self.assertEqual(str(c1), 'SameAtomsAsVertexConstraint(vertex=v1)') - c2 = SameAtomsAsVertexConstraint(v2) - self.assertNotEqual(c1, c2) - self.assertNotEqual(c1, "1.2.3.4") - d = {} - d[c1] = 1 - d[c2] = 2 - self.assertEqual(len(d), 2) diff --git a/unittests/operations_tests/partition_algorithms_tests/test_basic_partitioner.py b/unittests/operations_tests/partition_algorithms_tests/test_basic_partitioner.py index 30d362420..703249285 100644 --- a/unittests/operations_tests/partition_algorithms_tests/test_basic_partitioner.py +++ b/unittests/operations_tests/partition_algorithms_tests/test_basic_partitioner.py @@ -19,19 +19,18 @@ import unittest -from spinn_utilities.config_holder import set_config from pacman.config_setup import unittest_setup -from pacman.model.partitioner_splitters import SplitterSliceLegacy +from pacman.model.partitioner_splitters import SplitterFixedLegacy from pacman.operations.partition_algorithms import splitter_partitioner -from spinn_machine import ( - SDRAM, Link, Router, Chip, machine_from_chips, virtual_machine) -from pacman.model.graphs.application import ApplicationEdge, ApplicationGraph -from pacman.exceptions import ( - PacmanInvalidParameterException, PacmanException, - PacmanValueError) +from pacman.model.graphs.application import ApplicationGraph +from pacman.exceptions import PacmanInvalidParameterException from pacman_test_objects import NewPartitionerConstraint, SimpleTestVertex +def _n_machine_vertices(graph): + return sum([len(v.machine_vertices) for v in graph.vertices]) + + class TestBasicPartitioner(unittest.TestCase): """ test for basic partitioning algorithm @@ -45,99 +44,37 @@ def setUp(self): setup for all basic partitioner tests """ unittest_setup() - self.vert1 = SimpleTestVertex(10, "New AbstractConstrainedVertex 1") - self.vert1.splitter = SplitterSliceLegacy() - self.vert2 = SimpleTestVertex(5, "New AbstractConstrainedVertex 2") - self.vert2.splitter = SplitterSliceLegacy() - self.vert3 = SimpleTestVertex(3, "New AbstractConstrainedVertex 3") - self.vert3.splitter = SplitterSliceLegacy() - self.edge1 = ApplicationEdge( - self.vert1, self.vert2, label="First edge") - self.edge2 = ApplicationEdge( - self.vert2, self.vert1, label="Second edge") - self.edge3 = ApplicationEdge( - self.vert1, self.vert3, label="Third edge") - self.verts = [self.vert1, self.vert2, self.vert3] - self.edges = [self.edge1, self.edge2, self.edge3] - self.graph = ApplicationGraph("Graph") - self.graph.add_vertices(self.verts) - self.graph.add_edges(self.edges, "foo") - - n_processors = 18 - (e, ne, n, w, _, _) = range(6) - - links = list() - links.append(Link(0, 0, e, 0, 1)) - - _sdram = SDRAM(128 * (2**20)) - - links = list() - - links.append(Link(0, 0, e, 1, 1)) - links.append(Link(0, 1, ne, 1, 0)) - links.append(Link(1, 1, n, 0, 0)) - links.append(Link(1, 0, w, 0, 1)) - r = Router(links, False, 1024) - - ip = TestBasicPartitioner.TheTestAddress - chips = list() - for x in range(5): - for y in range(5): - if x == y == 0: - chips.append(Chip(x, y, n_processors, r, _sdram, 0, 0, ip)) - else: - chips.append(Chip(x, y, n_processors, r, _sdram, 0, 0)) - - self.machine = machine_from_chips(chips) def test_partition_with_no_additional_constraints(self): """ test a partitioning with a graph with no extra constraints """ - graph, _ = splitter_partitioner(self.graph, self.machine, 3000) - self.assertEqual(len(list(graph.vertices)), 3) - vert_sizes = [] - for vert in self.verts: - vert_sizes.append(vert.n_atoms) - self.assertEqual(len(list(graph.edges)), 3) - for vertex in graph.vertices: - self.assertIn(vertex.vertex_slice.n_atoms, vert_sizes) - - def test_partition_with_no_additional_constraints_extra_edge(self): - """ - test that the basic form with an extra edge works - """ - self.graph.add_edge( - ApplicationEdge(self.vert3, self.vert1, label="extra"), "TEST") - graph, _ = splitter_partitioner(self.graph, self.machine, 3000) - self.assertEqual(len(list(graph.vertices)), 3) - self.assertEqual(len(list(graph.edges)), 4) + vert1 = SimpleTestVertex(10, "New AbstractConstrainedVertex 1") + vert1.splitter = SplitterFixedLegacy() + vert2 = SimpleTestVertex(5, "New AbstractConstrainedVertex 2") + vert2.splitter = SplitterFixedLegacy() + vert3 = SimpleTestVertex(3, "New AbstractConstrainedVertex 3") + vert3.splitter = SplitterFixedLegacy() + verts = [vert1, vert2, vert3] + graph = ApplicationGraph("Graph") + graph.add_vertices(verts) + splitter_partitioner(graph, 3000) + self.assertEqual(_n_machine_vertices(graph), 3) + for vert in verts: + for m_vert in vert.machine_vertices: + self.assertEqual(vert.n_atoms, m_vert.vertex_slice.n_atoms) def test_partition_on_large_vertex_than_has_to_be_split(self): """ test that partitioning 1 large vertex can make it into 2 small ones """ large_vertex = SimpleTestVertex(300, "Large vertex") - large_vertex.splitter = SplitterSliceLegacy() - self.graph = ApplicationGraph("Graph with large vertex") - self.graph.add_vertex(large_vertex) + large_vertex.splitter = SplitterFixedLegacy() + graph = ApplicationGraph("Graph with large vertex") + graph.add_vertex(large_vertex) self.assertEqual(large_vertex._model_based_max_atoms_per_core, 256) - graph, _ = splitter_partitioner(self.graph, self.machine, 1000) - self.assertGreater(len(list(graph.vertices)), 1) - - def test_partition_on_very_large_vertex_than_has_to_be_split(self): - """ - test that partitioning 1 large vertex can make it into multiple small - ones - """ - large_vertex = SimpleTestVertex(500, "Large vertex") - large_vertex.splitter = SplitterSliceLegacy() - self.assertEqual(large_vertex._model_based_max_atoms_per_core, 256) - self.graph = ApplicationGraph("Graph with large vertex") - self.graph.add_vertex(large_vertex) - graph, _ = splitter_partitioner(self.graph, self.machine, 3000) - self.assertEqual(large_vertex._model_based_max_atoms_per_core, 256) - self.assertGreater(len(list(graph.vertices)), 1) + splitter_partitioner(graph, 1000) + self.assertEqual(_n_machine_vertices(graph), 2) def test_partition_on_target_size_vertex_than_has_to_be_split(self): """ @@ -145,158 +82,11 @@ def test_partition_on_target_size_vertex_than_has_to_be_split(self): """ large_vertex = SimpleTestVertex( 1000, "Large vertex", max_atoms_per_core=10) - large_vertex.splitter = SplitterSliceLegacy() - self.graph = ApplicationGraph("Graph with large vertex") - self.graph.add_vertex(large_vertex) - graph, _ = splitter_partitioner(self.graph, self.machine, 3000) - self.assertEqual(len(list(graph.vertices)), 100) - - def test_partition_with_barely_sufficient_space(self): - """ - test that partitioning will work when close to filling the machine - """ - n_processors = 18 - (e, ne, n, w, _, _) = range(6) - - links = list() - links.append(Link(0, 0, e, 0, 1)) - - _sdram = SDRAM(2**12) - - links = list() - - links.append(Link(0, 0, e, 1, 1)) - links.append(Link(0, 1, ne, 1, 0)) - links.append(Link(1, 1, n, 0, 0)) - links.append(Link(1, 0, w, 0, 1)) - r = Router(links, False, 1024) - - ip = TestBasicPartitioner.TheTestAddress - chips = list() - for x in range(5): - for y in range(5): - if x == y == 0: - chips.append(Chip(x, y, n_processors, r, _sdram, 0, 0, ip)) - else: - chips.append(Chip(x, y, n_processors, r, _sdram, 0, 0)) - - self.machine = machine_from_chips(chips) - n_neurons = 17 * 5 * 5 - singular_vertex = SimpleTestVertex(n_neurons, "Large vertex", - max_atoms_per_core=1) - singular_vertex.splitter = SplitterSliceLegacy() - self.assertEqual(singular_vertex._model_based_max_atoms_per_core, 1) - self.graph = ApplicationGraph("Graph with large vertex") - self.graph.add_vertex(singular_vertex) - graph, _ = splitter_partitioner(self.graph, self.machine, 3000) - self.assertEqual(singular_vertex._model_based_max_atoms_per_core, 1) - self.assertEqual(len(list(graph.vertices)), n_neurons) - - def test_partition_with_insufficient_space(self): - """ - test that if there's not enough space, the test the partitioner will - raise an error - """ - n_processors = 18 - (e, ne, n, w, _, _) = range(6) - - links = list() - links.append(Link(0, 0, e, 0, 1)) - - _sdram = SDRAM(2**11) - - links = list() - - links.append(Link(0, 0, e, 1, 1)) - links.append(Link(0, 1, ne, 1, 0)) - links.append(Link(1, 1, n, 0, 0)) - links.append(Link(1, 0, w, 0, 1)) - r = Router(links, False, 1024) - - ip = TestBasicPartitioner.TheTestAddress - chips = list() - for x in range(5): - for y in range(5): - if x == y == 0: - chips.append(Chip(x, y, n_processors, r, _sdram, 0, 0, ip)) - else: - chips.append(Chip(x, y, n_processors, r, _sdram, 0, 0)) - - self.machine = machine_from_chips(chips) - large_vertex = SimpleTestVertex(3000, "Large vertex", - max_atoms_per_core=1) - large_vertex.splitter = SplitterSliceLegacy() - self.assertEqual(large_vertex._model_based_max_atoms_per_core, 1) - self.graph = ApplicationGraph("Graph with large vertex") - self.graph.add_vertex(large_vertex) - with self.assertRaises(PacmanException): - splitter_partitioner(self.graph, self.machine, 3000) - - def test_partition_with_less_sdram_than_default(self): - """ - test that the partitioner works when its machine is slightly malformed - in that it has less SDRAM available - """ - n_processors = 18 - (e, ne, n, w, _, _) = range(6) - - links = list() - links.append(Link(0, 0, e, 0, 1)) - - _sdram = SDRAM(128 * (2**19)) - - links = list() - - links.append(Link(0, 0, e, 1, 1)) - links.append(Link(0, 1, ne, 1, 0)) - links.append(Link(1, 1, n, 0, 0)) - links.append(Link(1, 0, w, 0, 1)) - r = Router(links, False, 1024) - - ip = TestBasicPartitioner.TheTestAddress - chips = list() - for x in range(5): - for y in range(5): - if x == y == 0: - chips.append(Chip(x, y, n_processors, r, _sdram, 0, 0, ip)) - else: - chips.append(Chip(x, y, n_processors, r, _sdram, 0, 0)) - - self.machine = machine_from_chips(chips) - splitter_partitioner(self.graph, self.machine, 3000) - - def test_partition_with_more_sdram_than_default(self): - """ - test that the partitioner works when its machine is slightly malformed - in that it has more SDRAM available - """ - n_processors = 18 - (e, ne, n, w, _, _) = range(6) - - links = list() - links.append(Link(0, 0, e, 0, 1)) - - _sdram = SDRAM(128 * (2**21)) - - links = list() - - links.append(Link(0, 0, e, 1, 1)) - links.append(Link(0, 1, ne, 1, 0)) - links.append(Link(1, 1, n, 0, 0)) - links.append(Link(1, 0, w, 0, 1)) - r = Router(links, False, 1024) - - ip = TestBasicPartitioner.TheTestAddress - chips = list() - for x in range(5): - for y in range(5): - if x == y == 0: - chips.append(Chip(x, y, n_processors, r, _sdram, 0, 0, ip)) - else: - chips.append(Chip(x, y, n_processors, r, _sdram, 0, 0)) - - self.machine = machine_from_chips(chips) - splitter_partitioner(self.graph, self.machine, 3000) + large_vertex.splitter = SplitterFixedLegacy() + graph = ApplicationGraph("Graph with large vertex") + graph.add_vertex(large_vertex) + splitter_partitioner(graph, 3000) + self.assertEqual(_n_machine_vertices(graph), 100) def test_partition_with_unsupported_constraints(self): """ @@ -307,65 +97,31 @@ def test_partition_with_unsupported_constraints(self): constrained_vertex.add_constraint( NewPartitionerConstraint("Mock constraint")) with self.assertRaises(PacmanInvalidParameterException): - constrained_vertex.splitter = SplitterSliceLegacy() + constrained_vertex.splitter = SplitterFixedLegacy() def test_partition_with_empty_graph(self): """ test that the partitioner can work with an empty graph """ - self.graph = ApplicationGraph("foo") - graph, _ = splitter_partitioner(self.graph, self.machine, 3000) - self.assertEqual(len(list(graph.vertices)), 0) + graph = ApplicationGraph("foo") + splitter_partitioner(graph, 3000) + self.assertEqual(_n_machine_vertices(graph), 0) def test_partition_with_fixed_atom_constraints(self): - """ - test a partitioning with a graph with fixed atom constraint - """ - - # Create a 2x2 machine with 10 cores per chip (so 40 cores), - # but 1 off 2 per chip (so 19 per chip) - n_cores_per_chip = 10 - sdram_per_chip = (n_cores_per_chip * 2) - 1 - set_config("Machine", "max_sdram_allowed_per_chip", sdram_per_chip) - machine = virtual_machine( - width=2, height=2, n_cpus_per_chip=n_cores_per_chip) - - # Create a vertex where each atom requires 1MB (default) of SDRAM - # but which can't be subdivided lower than 2 atoms per core. - # The vertex has 1 atom per MB of SDRAM, and so would fit but will - # be disallowed by the fixed atoms per core constraint - vertex = SimpleTestVertex( - sdram_per_chip * machine.n_chips, max_atoms_per_core=2) - vertex.splitter = SplitterSliceLegacy() - app_graph = ApplicationGraph("Test") - app_graph.add_vertex(vertex) - - # Do the partitioning - this should result in an error - with self.assertRaises(PacmanValueError): - splitter_partitioner(app_graph, machine, 3000) - - def test_partition_with_max_atom_constraints_at_limit(self): """ test a partitioning with a graph with fixed atom constraint which\ should fit but is close to the limit """ - # Create a 2x2 machine with 1 core per chip (so 4 cores), - # and SDRAM per chip - n_cores_per_chip = 2 # Remember 1 core is the monitor - machine = virtual_machine( - width=2, height=2, n_cpus_per_chip=n_cores_per_chip) - - # Create a vertex which will need to be split perfectly into 4 cores - # to work and which max atoms per core must be ignored + # Create a vertex which will be split perfectly into 4 cores vertex = SimpleTestVertex(16, max_atoms_per_core=4) - vertex.splitter = SplitterSliceLegacy() + vertex.splitter = SplitterFixedLegacy() app_graph = ApplicationGraph("Test") app_graph.add_vertex(vertex) # Do the partitioning - this should just work - machine_graph, _ = splitter_partitioner(app_graph, machine, 3000) - self.assertEqual(4, len(machine_graph.vertices)) + splitter_partitioner(app_graph, 3000) + self.assertEqual(4, _n_machine_vertices(app_graph)) if __name__ == '__main__': diff --git a/unittests/operations_tests/partition_algorithms_tests/test_partitioner.py b/unittests/operations_tests/partition_algorithms_tests/test_partitioner.py deleted file mode 100644 index 2ba05c574..000000000 --- a/unittests/operations_tests/partition_algorithms_tests/test_partitioner.py +++ /dev/null @@ -1,435 +0,0 @@ -# Copyright (c) 2017-2019 The University of Manchester -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . - -""" -test for partitioning -""" -import unittest -from pacman.config_setup import unittest_setup -from pacman.model.partitioner_splitters import SplitterSliceLegacy -from pacman.operations.partition_algorithms import splitter_partitioner -from spinn_machine import ( - SDRAM, Link, Router, Chip, machine_from_chips, virtual_machine) -from pacman.model.graphs.application import ApplicationEdge, ApplicationGraph -from pacman.exceptions import ( - PacmanPartitionException, PacmanInvalidParameterException, - PacmanValueError) -from pacman.model.constraints.partitioner_constraints import ( - SameAtomsAsVertexConstraint) -from pacman.model.resources import PreAllocatedResourceContainer -from pacman_test_objects import NewPartitionerConstraint, SimpleTestVertex - - -class TestPartitioner(unittest.TestCase): - """ - test for partition-and-place partitioning algorithm - """ - - def setUp(self): - """setup for all basic partitioner tests - """ - unittest_setup() - self.vert1 = SimpleTestVertex(10, "New AbstractConstrainedVertex 1") - self.vert1.splitter = SplitterSliceLegacy() - self.vert2 = SimpleTestVertex(5, "New AbstractConstrainedVertex 2") - self.vert2.splitter = SplitterSliceLegacy() - self.vert3 = SimpleTestVertex(3, "New AbstractConstrainedVertex 3") - self.vert3.splitter = SplitterSliceLegacy() - self.edge1 = ApplicationEdge( - self.vert1, self.vert2, label="First edge") - self.edge2 = ApplicationEdge( - self.vert2, self.vert1, label="Second edge") - self.edge3 = ApplicationEdge( - self.vert1, self.vert3, label="Third edge") - self.verts = [self.vert1, self.vert2, self.vert3] - self.edges = [self.edge1, self.edge2, self.edge3] - self.graph = ApplicationGraph("Graph") - self.graph.add_vertices(self.verts) - self.graph.add_edges(self.edges, "foo") - - n_processors = 18 - (e, ne, n, w, _, _) = range(6) - - links = list() - links.append(Link(0, 0, e, 0, 1)) - - _sdram = SDRAM(128 * (2**20)) - - links = list() - - links.append(Link(0, 0, e, 1, 1)) - links.append(Link(0, 1, ne, 1, 0)) - links.append(Link(1, 1, n, 0, 0)) - links.append(Link(1, 0, w, 0, 1)) - r = Router(links, False, 1024) - - ip = "192.162.240.253" - chips = list() - for x in range(5): - for y in range(5): - if x == y == 0: - chips.append(Chip(x, y, n_processors, r, _sdram, 0, 0, ip)) - else: - chips.append(Chip(x, y, n_processors, r, _sdram, 0, 0)) - - self.machine = machine_from_chips(chips) - - def test_partition_with_no_additional_constraints(self): - """test a partitioning with a graph with no extra constraints - """ - graph, _ = splitter_partitioner( - self.graph, self.machine, plan_n_time_steps=100, - pre_allocated_resources=PreAllocatedResourceContainer()) - self.assertEqual(len(list(graph.vertices)), 3) - vert_sizes = [] - for vert in self.verts: - vert_sizes.append(vert.n_atoms) - self.assertEqual(len(list(graph.edges)), 3) - for vertex in graph.vertices: - self.assertIn(vertex.vertex_slice.n_atoms, vert_sizes) - - def test_partition_with_no_additional_constraints_extra_edge(self): - """test that the basic form with an extra edge works - """ - self.graph.add_edge( - ApplicationEdge(self.vert3, self.vert1), "TEST") - graph, _ = splitter_partitioner( - self.graph, self.machine, plan_n_time_steps=100, - pre_allocated_resources=PreAllocatedResourceContainer()) - self.assertEqual(len(list(graph.vertices)), 3) - self.assertEqual(len(list(graph.edges)), 4) - - def test_partition_on_large_vertex_than_has_to_be_split(self): - """ - test that partitioning 1 large vertex can make it into 2 small ones - """ - large_vertex = SimpleTestVertex(300, "Large vertex") - large_vertex.splitter = SplitterSliceLegacy() - self.graph = ApplicationGraph("Graph with large vertex") - self.graph.add_vertex(large_vertex) - graph, _ = splitter_partitioner( - self.graph, self.machine, plan_n_time_steps=100, - pre_allocated_resources=PreAllocatedResourceContainer()) - self.assertEqual(large_vertex._model_based_max_atoms_per_core, 256) - self.assertGreater(len(list(graph.vertices)), 1) - - def test_partition_on_very_large_vertex_than_has_to_be_split(self): - """ - test that partitioning 1 large vertex can make it into multiple small - ones - """ - large_vertex = SimpleTestVertex(500, "Large vertex") - large_vertex.splitter = SplitterSliceLegacy() - self.assertEqual(large_vertex._model_based_max_atoms_per_core, 256) - self.graph = ApplicationGraph("Graph with large vertex") - self.graph.add_vertex(large_vertex) - graph, _ = splitter_partitioner( - self.graph, self.machine, plan_n_time_steps=100, - pre_allocated_resources=PreAllocatedResourceContainer()) - self.assertEqual(large_vertex._model_based_max_atoms_per_core, 256) - self.assertGreater(len(list(graph.vertices)), 1) - - def test_partition_on_target_size_vertex_than_has_to_be_split(self): - """ - test that fixed partitioning causes correct number of vertices - """ - large_vertex = SimpleTestVertex( - 1000, "Large vertex", max_atoms_per_core=10) - large_vertex.splitter = SplitterSliceLegacy() - self.graph = ApplicationGraph("Graph with large vertex") - self.graph.add_vertex(large_vertex) - graph, _ = splitter_partitioner( - self.graph, self.machine, plan_n_time_steps=100, - pre_allocated_resources=PreAllocatedResourceContainer()) - self.assertEqual(len(list(graph.vertices)), 100) - - def test_partition_with_barely_sufficient_space(self): - """ - test that partitioning will work when close to filling the machine - """ - n_processors = 18 - (e, ne, n, w, _, _) = range(6) - - links = list() - links.append(Link(0, 0, e, 0, 1)) - - _sdram = SDRAM(2**12) - - links = list() - - links.append(Link(0, 0, e, 1, 1)) - links.append(Link(0, 1, ne, 1, 0)) - links.append(Link(1, 1, n, 0, 0)) - links.append(Link(1, 0, w, 0, 1)) - r = Router(links, False, 1024) - - ip = "192.162.240.253" - chips = list() - for x in range(5): - for y in range(5): - if x == y == 0: - chips.append(Chip(x, y, n_processors, r, _sdram, 0, 0, ip)) - else: - chips.append(Chip(x, y, n_processors, r, _sdram, 0, 0)) - - self.machine = machine_from_chips(chips) - n_neurons = 17 * 5 * 5 - singular_vertex = SimpleTestVertex(n_neurons, "Large vertex", - max_atoms_per_core=1) - singular_vertex.splitter = SplitterSliceLegacy() - self.assertEqual(singular_vertex._model_based_max_atoms_per_core, 1) - self.graph = ApplicationGraph("Graph with large vertex") - self.graph.add_vertex(singular_vertex) - graph, _ = splitter_partitioner( - self.graph, self.machine, plan_n_time_steps=100, - pre_allocated_resources=PreAllocatedResourceContainer()) - self.assertEqual(singular_vertex._model_based_max_atoms_per_core, 1) - self.assertEqual(len(list(graph.vertices)), n_neurons) - - def test_partition_with_insufficient_space(self): - """ - test that if there's not enough space, the test the partitioner will - raise an error - """ - n_processors = 18 - (e, ne, n, w, _, _) = range(6) - - links = list() - links.append(Link(0, 0, e, 0, 1)) - - _sdram = SDRAM(2**11) - - links = list() - - links.append(Link(0, 0, e, 1, 1)) - links.append(Link(0, 1, ne, 1, 0)) - links.append(Link(1, 1, n, 0, 0)) - links.append(Link(1, 0, w, 0, 1)) - r = Router(links, False, 1024) - - ip = "192.162.240.253" - chips = list() - for x in range(5): - for y in range(5): - if x == y == 0: - chips.append(Chip(x, y, n_processors, r, _sdram, 0, 0, ip)) - else: - chips.append(Chip(x, y, n_processors, r, _sdram, 0, 0)) - - self.machine = machine_from_chips(chips) - large_vertex = SimpleTestVertex(3000, "Large vertex", - max_atoms_per_core=1) - large_vertex.splitter = SplitterSliceLegacy() - self.assertEqual(large_vertex._model_based_max_atoms_per_core, 1) - self.graph = ApplicationGraph("Graph with large vertex") - self.graph.add_vertex(large_vertex) - with self.assertRaises(PacmanValueError): - splitter_partitioner(self.graph, self.machine, 3000, - PreAllocatedResourceContainer()) - - def test_partition_with_less_sdram_than_default(self): - """ - test that the partitioner works when its machine is slightly malformed - in that it has less SDRAM available - """ - n_processors = 18 - (e, ne, n, w, _, _) = range(6) - - links = list() - links.append(Link(0, 0, e, 0, 1)) - - _sdram = SDRAM(128 * (2**19)) - - links = list() - - links.append(Link(0, 0, e, 1, 1)) - links.append(Link(0, 1, ne, 1, 0)) - links.append(Link(1, 1, n, 0, 0)) - links.append(Link(1, 0, w, 0, 1)) - r = Router(links, False, 1024) - - ip = "192.162.240.253" - chips = list() - for x in range(5): - for y in range(5): - if x == y == 0: - chips.append(Chip(x, y, n_processors, r, _sdram, 0, 0, ip)) - else: - chips.append(Chip(x, y, n_processors, r, _sdram, 0, 0)) - - self.machine = machine_from_chips(chips) - splitter_partitioner(self.graph, self.machine, 3000, - PreAllocatedResourceContainer()) - - def test_partition_with_more_sdram_than_default(self): - """ - test that the partitioner works when its machine is slightly malformed - in that it has more SDRAM available - """ - n_processors = 18 - (e, ne, n, w, _, _) = range(6) - - links = list() - links.append(Link(0, 0, e, 0, 1)) - - _sdram = SDRAM(128 * (2**21)) - - links = list() - - links.append(Link(0, 0, e, 1, 1)) - links.append(Link(0, 1, ne, 1, 0)) - links.append(Link(1, 1, n, 0, 0)) - links.append(Link(1, 0, w, 0, 1)) - r = Router(links, False, 1024) - - ip = "192.162.240.253" - chips = list() - for x in range(5): - for y in range(5): - if x == y == 0: - chips.append(Chip(x, y, n_processors, r, _sdram, 0, 0, ip)) - else: - chips.append(Chip(x, y, n_processors, r, _sdram, 0, 0)) - - self.machine = machine_from_chips(chips) - splitter_partitioner(self.graph, self.machine, 3000, - PreAllocatedResourceContainer()) - - def test_partition_with_unsupported_constraints(self): - """ - test that when a vertex has a constraint that is unrecognised, - it raises an error - """ - constrained_vertex = SimpleTestVertex(13, "Constrained") - constrained_vertex.add_constraint( - NewPartitionerConstraint("Mock constraint")) - with self.assertRaises(PacmanInvalidParameterException): - constrained_vertex.splitter = SplitterSliceLegacy() - - def test_partition_with_empty_graph(self): - """test that the partitioner can work with an empty graph - """ - self.graph = ApplicationGraph("foo") - graph, _ = splitter_partitioner( - self.graph, self.machine, plan_n_time_steps=100, - pre_allocated_resources=PreAllocatedResourceContainer()) - self.assertEqual(len(list(graph.vertices)), 0) - - def test_operation_with_same_size_as_vertex_constraint(self): - """ - test that the partition and place partitioner can handle same size as - constraints on a vertex that is split into one core - """ - with self.assertRaises(NotImplementedError): - constrained_vertex = SimpleTestVertex(5, "Constrained") - constrained_vertex.add_constraint( - SameAtomsAsVertexConstraint(self.vert2)) - constrained_vertex.splitter_object = SplitterSliceLegacy() - self.graph.add_vertex(constrained_vertex) - graph, _ = splitter_partitioner( - self.graph, self.machine, plan_n_time_steps=100, - pre_allocated_resources=PreAllocatedResourceContainer()) - self.assertEqual(len(list(graph.vertices)), 4) - - def test_operation_with_same_size_as_vertex_constraint_large_vertices( - self): - """ - test that the partition and place partitioner can handle same size as - constraints on a vertex which has to be split over many cores - """ - with self.assertRaises(NotImplementedError): - constrained_vertex = SimpleTestVertex(300, "Constrained") - new_large_vertex = SimpleTestVertex(300, "Non constrained") - constrained_vertex.add_constraint( - SameAtomsAsVertexConstraint(new_large_vertex)) - new_large_vertex.splitter_object = SplitterSliceLegacy() - constrained_vertex.splitter_object = SplitterSliceLegacy() - self.graph.add_vertices([new_large_vertex, constrained_vertex]) - graph, _ = splitter_partitioner( - self.graph, self.machine, plan_n_time_steps=100, - pre_allocated_resources=PreAllocatedResourceContainer()) - self.assertEqual(len(list(graph.vertices)), 7) - - def test_operation_same_size_as_vertex_constraint_different_order(self): - """ - test that the partition and place partitioner can handle same size as - constraints on a vertex which has to be split over many cores where - the order of the vertices being added is different. - """ - with self.assertRaises(NotImplementedError): - constrained_vertex = SimpleTestVertex(300, "Constrained") - new_large_vertex = SimpleTestVertex(300, "Non constrained") - constrained_vertex.add_constraint( - SameAtomsAsVertexConstraint(new_large_vertex)) - constrained_vertex.splitter_object = SplitterSliceLegacy() - new_large_vertex.splitter_object = SplitterSliceLegacy() - self.graph.add_vertices([constrained_vertex, new_large_vertex]) - graph, _ = splitter_partitioner( - self.graph, self.machine, plan_n_time_steps=100, - pre_allocated_resources=PreAllocatedResourceContainer()) - # split in 256 each, so 4 machine vertices - self.assertEqual(len(list(graph.vertices)), 7) - - def test_operation_with_same_size_as_vertex_constraint_exception(self): - """ - test that a partition same as constraint with different size atoms - causes errors - """ - with self.assertRaises(NotImplementedError): - constrained_vertex = SimpleTestVertex(100, "Constrained") - constrained_vertex.add_constraint( - SameAtomsAsVertexConstraint(self.vert2)) - constrained_vertex.splitter_object = SplitterSliceLegacy() - self.graph.add_vertex(constrained_vertex) - self.assertRaises(PacmanPartitionException, splitter_partitioner, - self.graph, self.machine, 1000, - PreAllocatedResourceContainer()) - - def test_operation_with_same_size_as_vertex_constraint_chain(self): - """ Test that a chain of same size constraints works even when the\ - order of vertices is not correct for the chain - """ - with self.assertRaises(NotImplementedError): - graph = ApplicationGraph("Test") - vertex_1 = SimpleTestVertex(10, "Vertex_1", 5) - vertex_1.splitter_object = SplitterSliceLegacy() - vertex_2 = SimpleTestVertex(10, "Vertex_2", 4) - vertex_3 = SimpleTestVertex(10, "Vertex_3", 2) - vertex_3.add_constraint(SameAtomsAsVertexConstraint(vertex_2)) - vertex_2.add_constraint(SameAtomsAsVertexConstraint(vertex_1)) - vertex_2.splitter_object = SplitterSliceLegacy() - vertex_3.splitter_object = SplitterSliceLegacy() - graph.add_vertices([vertex_1, vertex_2, vertex_3]) - machine = virtual_machine(width=2, height=2) - splitter_partitioner(graph, machine, plan_n_time_steps=None) - subvertices_1 = list(vertex_1.machine_vertices) - subvertices_2 = list(vertex_2.machine_vertices) - subvertices_3 = list(vertex_3.machine_vertices) - self.assertEqual(len(subvertices_1), len(subvertices_2)) - self.assertEqual(len(subvertices_2), len(subvertices_3)) - - def test_partitioning_with_2_massive_pops(self): - constrained_vertex = SimpleTestVertex(16000, "Constrained") - constrained_vertex.splitter = SplitterSliceLegacy() - self.graph.add_vertex(constrained_vertex) - constrained_vertex = SimpleTestVertex(16000, "Constrained") - constrained_vertex.splitter = SplitterSliceLegacy() - self.graph.add_vertex(constrained_vertex) - splitter_partitioner( - self.graph, self.machine, 3000, PreAllocatedResourceContainer()) - - -if __name__ == '__main__': - unittest.main() diff --git a/unittests/operations_tests/partition_algorithms_tests/test_partitioner_with_pre_allocated_resources.py b/unittests/operations_tests/partition_algorithms_tests/test_partitioner_with_pre_allocated_resources.py deleted file mode 100644 index 3759b3477..000000000 --- a/unittests/operations_tests/partition_algorithms_tests/test_partitioner_with_pre_allocated_resources.py +++ /dev/null @@ -1,65 +0,0 @@ -# Copyright (c) 2017-2019 The University of Manchester -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . - -from spinn_machine import virtual_machine -from pacman.config_setup import unittest_setup -from pacman.model.partitioner_splitters import SplitterSliceLegacy -from pacman.operations.partition_algorithms import splitter_partitioner -from pacman.model.constraints.placer_constraints import ( - ChipAndCoreConstraint) -from pacman.model.graphs.application import ApplicationGraph -from pacman.model.resources import (PreAllocatedResourceContainer) -from pacman_test_objects import SimpleTestVertex - - -class TestPartitionerWithPreAllocatedResources(object): - """ tests the interaction of the pre allocated res with the partitioner\ - and place partitioner - """ - - def test_1_chip_no_pre_allocated_too_much_sdram(self): - unittest_setup() - machine = virtual_machine(width=8, height=8) - graph = ApplicationGraph("Test") - - eight_meg = 8 * 1024 * 1024 - - # add graph vertices which reside on 0,0 - for _ in range(0, 13): - vertex = SimpleTestVertex( - constraints=[ChipAndCoreConstraint(x=0, y=0)], - n_atoms=1, - fixed_sdram_value=eight_meg) - vertex.splitter = SplitterSliceLegacy() - graph.add_vertex(vertex) - - # add pre-allocated resources for cores on 0,0 - pre_allocated_res = PreAllocatedResourceContainer() - - # run partitioner that should go boom - try: - splitter_partitioner(graph, machine, plan_n_time_steps=None, - pre_allocated_resources=pre_allocated_res) - except Exception as e: - raise Exception("should have blown up here") from e - - -if __name__ == "__main__": - - test = TestPartitionerWithPreAllocatedResources() - test.test_1_chip_over_pre_allocated() - test.test_1_chip_under_pre_allocated() - test.test_1_chip_pre_allocated_same_core() - test.test_1_chip_pre_allocated_too_much_sdram() diff --git a/unittests/operations_tests/partition_algorithms_tests/test_splitter_partitioner.py b/unittests/operations_tests/partition_algorithms_tests/test_splitter_partitioner.py deleted file mode 100644 index d5125a557..000000000 --- a/unittests/operations_tests/partition_algorithms_tests/test_splitter_partitioner.py +++ /dev/null @@ -1,117 +0,0 @@ -# Copyright (c) 2017-2019 The University of Manchester -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . - -""" -test for SplitterPartitioner functions -""" - -import unittest -from pacman.config_setup import unittest_setup -from pacman.model.partitioner_splitters import SplitterSliceLegacy -from pacman.model.partitioner_splitters.abstract_splitters import ( - AbstractDependentSplitter) -from pacman.operations.partition_algorithms.splitter_partitioner import ( - _SplitterPartitioner) -from pacman.exceptions import ( - PacmanAlreadyExistsException, PacmanPartitionException) -from pacman_test_objects import SimpleTestVertex - - -class MockDependant(AbstractDependentSplitter): - - def create_machine_vertices(self, resource_tracker, machine_graph): - raise NotImplementedError() - - def get_out_going_slices(self): - raise NotImplementedError() - - def get_in_coming_slices(self): - raise NotImplementedError() - - def get_out_going_vertices(self, edge, outgoing_edge_partition): - raise NotImplementedError() - - def get_in_coming_vertices( - self, edge, outgoing_edge_partition, src_machine_vertex): - raise NotImplementedError() - - def machine_vertices_for_recording(self, variable_to_record): - raise NotImplementedError() - - def reset_called(self): - raise NotImplementedError() - - -class TestSplitterPartitioner(unittest.TestCase): - """ - test for SplitterPartitioner functions - """ - - def setUp(self): - unittest_setup() - - def test_order_vertices_for_dependent_splitters(self): - vertices = list() - v1 = SimpleTestVertex(1, splitter=SplitterSliceLegacy(), label="v1") - vertices.append(v1) - s2 = SplitterSliceLegacy() - v2 = SimpleTestVertex(1, splitter=s2, label="v2") - s3 = SplitterSliceLegacy() - s2a = MockDependant(s2, "depends on v2") - v2a = SimpleTestVertex(1, splitter=s2a, label="A depends on v2") - s2a.set_governed_app_vertex(v2a) - v2aa = SimpleTestVertex( - 1, splitter=MockDependant(s2a, "depends on v2a"), - label="A depends on v2a") - vertices.append(v2aa) - v3a = SimpleTestVertex(1, splitter=MockDependant(s3, "depends on v3"), - label="A depends on v3") - vertices.append(v3a) - vertices.append(v2a) - v2b = SimpleTestVertex(1, splitter=MockDependant(s2, "depends on v2"), - label="B depends on v2") - vertices.append(v2b) - vertices.append(v2) - v3 = SimpleTestVertex(1, splitter=s3, label="v3") - vertices.append(v3) - v4 = SimpleTestVertex(1, splitter=SplitterSliceLegacy(), label="v4") - vertices.append(v4) - sp = _SplitterPartitioner() - sp.order_vertices_for_dependent_splitters(vertices) - self.assertLess(vertices.index(v1), vertices.index(v2)) - self.assertLess(vertices.index(v2), vertices.index(v3)) - self.assertLess(vertices.index(v3), vertices.index(v4)) - self.assertLess(vertices.index(v2), vertices.index(v2a)) - self.assertLess(vertices.index(v2a), vertices.index(v2aa)) - self.assertLess(vertices.index(v2), vertices.index(v2b)) - self.assertLess(vertices.index(v3), vertices.index(v3a)) - - def test_detect_circular(self): - s1 = MockDependant(None, "depends on s3") - SimpleTestVertex(1, splitter=s1, label="v1") - s2 = MockDependant(s1, "depends on s1") - SimpleTestVertex(1, splitter=s2, label="v2") - s3 = MockDependant(s2, "depends on s2") - SimpleTestVertex(1, splitter=s3, label="v3") - with self.assertRaises(PacmanAlreadyExistsException): - s3.other_splitter = s1 - with self.assertRaises(PacmanPartitionException): - s1.other_splitter = s3 - with self.assertRaises(PacmanPartitionException): - s1.other_splitter = s1 - - -if __name__ == '__main__': - unittest.main() diff --git a/unittests/operations_tests/placer_algorithms_tests/test_application_placer.py b/unittests/operations_tests/placer_algorithms_tests/test_application_placer.py new file mode 100644 index 000000000..60e37da7a --- /dev/null +++ b/unittests/operations_tests/placer_algorithms_tests/test_application_placer.py @@ -0,0 +1,93 @@ +# Copyright (c) 2022 The University of Manchester +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +from spinn_machine.virtual_machine import virtual_machine +from pacman.model.partitioner_splitters.abstract_splitters import ( + AbstractSplitterCommon) +from pacman.operations.placer_algorithms.application_placer import ( + place_application_graph) +from pacman.model.graphs.machine import SimpleMachineVertex +from pacman.model.resources import ResourceContainer, ConstantSDRAM +from pacman.model.graphs.application import ApplicationVertex, ApplicationGraph +from pacman.config_setup import unittest_setup +from pacman.model.placements.placements import Placements + + +class TestSplitter(AbstractSplitterCommon): + + def __init__(self, n_groups, n_machine_vertices): + AbstractSplitterCommon.__init__(self) + self.__n_groups = n_groups + self.__n_machine_vertices = n_machine_vertices + self.__same_chip_groups = list() + + def create_machine_vertices(self, chip_counter): + for _ in range(self.__n_groups): + m_vertices = [ + SimpleMachineVertex( + ResourceContainer(), app_vertex=self._governed_app_vertex, + label=f"{self._governed_app_vertex.label}_{i}") + for i in range(self.__n_machine_vertices)] + for m_vertex in m_vertices: + self._governed_app_vertex.remember_machine_vertex(m_vertex) + self.__same_chip_groups.append((m_vertices, ConstantSDRAM(0))) + + def get_out_going_slices(self): + return None + + def get_in_coming_slices(self): + return None + + def get_out_going_vertices(self, partition_id): + return self._governed_app_vertex.machine_vertices + + def get_in_coming_vertices(self, partition_id): + return self._governed_app_vertex.machine_vertices + + def machine_vertices_for_recording(self, variable_to_record): + return [] + + def reset_called(self): + pass + + def get_same_chip_groups(self): + return self.__same_chip_groups + + +class TestAppVertex(ApplicationVertex): + def __init__(self, n_atoms, label): + super(TestAppVertex, self).__init__(label) + self.__n_atoms = n_atoms + + @property + def n_atoms(self): + return self.__n_atoms + + +def _make_vertices(app_graph, n_atoms, n_groups, n_machine_vertices, label): + vertex = TestAppVertex(n_atoms, label) + vertex.splitter = TestSplitter(n_groups, n_machine_vertices) + app_graph.add_vertex(vertex) + vertex.splitter.create_machine_vertices(None) + return vertex + + +def test_application_placer(): + app_graph = ApplicationGraph("Test") + unittest_setup() + for i in range(56): + _make_vertices(app_graph, 1000, 14, 5, f"app_vertex_{i}") + + machine = virtual_machine(24, 12) + place_application_graph(machine, app_graph, 100, Placements()) diff --git a/unittests/operations_tests/placer_algorithms_tests/test_connecitve_placer.py b/unittests/operations_tests/placer_algorithms_tests/test_connecitve_placer.py deleted file mode 100644 index ef29b9bfc..000000000 --- a/unittests/operations_tests/placer_algorithms_tests/test_connecitve_placer.py +++ /dev/null @@ -1,114 +0,0 @@ -# Copyright (c) 2017-2019 The University of Manchester -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . - -import unittest -from spinn_machine import virtual_machine -from pacman.config_setup import unittest_setup -from pacman.model.graphs.common import Slice -from pacman.model.graphs.machine import ( - MachineGraph, MachineEdge, SimpleMachineVertex) -from pacman.model.resources import ( - ConstantSDRAM, CPUCyclesPerTickResource, DTCMResource, ResourceContainer) -from pacman.exceptions import PacmanValueError -from pacman.model.constraints.placer_constraints import ( - ChipAndCoreConstraint, RadialPlacementFromChipConstraint) -from pacman.operations.placer_algorithms import connective_based_placer -from pacman_test_objects import (get_resourced_machine_vertex) - - -class TestConnectivePlacer(unittest.TestCase): - def setUp(self): - unittest_setup() - self.machine = virtual_machine(8, 8) - self.mach_graph = MachineGraph("machine") - self.vertices = list() - self.vertex1 = get_resourced_machine_vertex(0, 1, "First vertex") - self.vertex2 = get_resourced_machine_vertex(1, 5, "Second vertex") - self.vertex3 = get_resourced_machine_vertex(5, 10, "Third vertex") - self.vertex4 = get_resourced_machine_vertex(10, 100, "Fourth vertex") - self.vertices.append(self.vertex1) - self.mach_graph.add_vertex(self.vertex1) - self.vertices.append(self.vertex2) - self.mach_graph.add_vertex(self.vertex2) - self.vertices.append(self.vertex3) - self.mach_graph.add_vertex(self.vertex3) - self.vertices.append(self.vertex4) - self.mach_graph.add_vertex(self.vertex4) - self.edges = list() - edge1 = MachineEdge(self.vertex2, self.vertex3) - self.edges.append(edge1) - self.mach_graph.add_edge(edge1, "packet") - edge2 = MachineEdge(self.vertex2, self.vertex4) - self.edges.append(edge2) - self.mach_graph.add_edge(edge2, "packet") - edge3 = MachineEdge(self.vertex3, self.vertex4) - self.edges.append(edge3) - self.mach_graph.add_edge(edge3, "packet") - edge4 = MachineEdge(self.vertex3, self.vertex1) - self.edges.append(edge4) - - self.plan_n_timesteps = 100 - - def test_simple(self): - placements = connective_based_placer( - self.mach_graph, self.machine, 100) - self.assertEqual(len(self.vertices), len(placements)) - - def test_place_vertex_too_big_with_vertex(self): - cpu_cycles = 1000 - dtcm_requirement = 1000 - sdram_requirement = self.machine.get_chip_at(0, 0).sdram.size * 20 - rc = ResourceContainer( - cpu_cycles=CPUCyclesPerTickResource(cpu_cycles), - dtcm=DTCMResource(dtcm_requirement), - sdram=ConstantSDRAM(sdram_requirement)) - - large_machine_vertex = SimpleMachineVertex( - rc, vertex_slice=Slice(0, 499), label="Second vertex") - self.mach_graph.add_vertex(large_machine_vertex) - with self.assertRaises(PacmanValueError): - connective_based_placer(self.mach_graph, self.machine, 100) - - def test_deal_with_constraint_placement_vertices_dont_have_vertex(self): - self.vertex2.add_constraint(ChipAndCoreConstraint(3, 5, 7)) - self.vertex3.add_constraint(RadialPlacementFromChipConstraint(2, 4)) - placements = connective_based_placer( - self.mach_graph, self.machine, 100) - for placement in placements.placements: - if placement.vertex == self.vertex2: - self.assertEqual(placement.x, 3) - self.assertEqual(placement.y, 5) - self.assertEqual(placement.p, 7) - if placement.vertex == self.vertex3: - self.assertEqual(placement.x, 2) - self.assertEqual(placement.y, 4) - self.assertEqual(len(self.vertices), len(placements)) - - def test_fill_machine(self): - graph = MachineGraph("machine") - cores = sum(chip.n_user_processors for chip in self.machine.chips) - for i in range(cores): # 50 atoms per each processor on 20 chips - graph.add_vertex(get_resourced_machine_vertex( - 0, 50, "vertex " + str(i))) - placements = connective_based_placer(graph, self.machine, 100) - self.assertEqual(len(placements), cores) - # One more vertex should be too many - graph.add_vertex(get_resourced_machine_vertex(0, 50, "toomany")) - with self.assertRaises(PacmanValueError): - connective_based_placer(graph, self.machine, 100) - - -if __name__ == '__main__': - unittest.main() diff --git a/unittests/operations_tests/placer_algorithms_tests/test_one_to_one_placer.py b/unittests/operations_tests/placer_algorithms_tests/test_one_to_one_placer.py deleted file mode 100644 index 76d11c804..000000000 --- a/unittests/operations_tests/placer_algorithms_tests/test_one_to_one_placer.py +++ /dev/null @@ -1,172 +0,0 @@ -# Copyright (c) 2017-2019 The University of Manchester -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . -import pytest -from spinn_machine.virtual_machine import virtual_machine -from pacman.config_setup import unittest_setup -from pacman.exceptions import PacmanException -from pacman.model.graphs.machine import ( - MachineGraph, SimpleMachineVertex, MachineSpiNNakerLinkVertex, MachineEdge, - SDRAMMachineEdge) -from pacman.model.graphs.machine import ConstantSDRAMMachinePartition -from pacman.model.resources.resource_container import ResourceContainer -from pacman.model.constraints.placer_constraints import ChipAndCoreConstraint -from pacman.operations.chip_id_allocator_algorithms import ( - malloc_based_chip_id_allocator) -from pacman.operations.placer_algorithms import one_to_one_placer -from pacman_test_objects import MockMachineVertex - - -def test_virtual_vertices_one_to_one(): - """ Test that the placer works with a virtual vertex - """ - unittest_setup() - - # Create a graph with a virtual vertex - machine_graph = MachineGraph("Test") - virtual_vertex = MachineSpiNNakerLinkVertex( - spinnaker_link_id=0, label="Virtual") - machine_graph.add_vertex(virtual_vertex) - - # These vertices are fixed on 0, 0 - misc_vertices = list() - for i in range(3): - misc_vertex = SimpleMachineVertex( - resources=ResourceContainer(), constraints=[ - ChipAndCoreConstraint(0, 0)], - label="Fixed_0_0_{}".format(i)) - machine_graph.add_vertex(misc_vertex) - misc_vertices.append(misc_vertex) - - # These vertices are 1-1 connected to the virtual vertex - one_to_one_vertices = list() - for i in range(16): - one_to_one_vertex = SimpleMachineVertex( - resources=ResourceContainer(), - label="Vertex_{}".format(i)) - machine_graph.add_vertex(one_to_one_vertex) - edge = MachineEdge(virtual_vertex, one_to_one_vertex) - machine_graph.add_edge(edge, "SPIKES") - one_to_one_vertices.append(one_to_one_vertex) - - # Get and extend the machine for the virtual chip - machine = virtual_machine(width=8, height=8) - extended_machine = malloc_based_chip_id_allocator(machine, machine_graph) - - # Do placements - placements = one_to_one_placer( - machine_graph, extended_machine, plan_n_timesteps=1000) - - # The virtual vertex should be on a virtual chip - placement = placements.get_placement_of_vertex(virtual_vertex) - assert machine.get_chip_at(placement.x, placement.y).virtual - - # The 0, 0 vertices should be on 0, 0 - for vertex in misc_vertices: - placement = placements.get_placement_of_vertex(vertex) - assert placement.x == placement.y == 0 - - # The other vertices should *not* be on a virtual chip - for vertex in one_to_one_vertices: - placement = placements.get_placement_of_vertex(vertex) - assert not machine.get_chip_at(placement.x, placement.y).virtual - - -def test_one_to_one(): - """ Test normal 1-1 placement - """ - unittest_setup() - - # Create a graph - machine_graph = MachineGraph("Test") - - # Connect a set of vertices in a chain of length 3 - one_to_one_chains = list() - for i in range(10): - last_vertex = None - chain = list() - for j in range(3): - vertex = SimpleMachineVertex( - resources=ResourceContainer(), - label="Vertex_{}_{}".format(i, j)) - machine_graph.add_vertex(vertex) - if last_vertex is not None: - edge = MachineEdge(last_vertex, vertex) - machine_graph.add_edge(edge, "SPIKES") - last_vertex = vertex - chain.append(vertex) - one_to_one_chains.append(chain) - - # Connect a set of 20 vertices in a chain - too_many_vertices = list() - last_vertex = None - for i in range(20): - vertex = SimpleMachineVertex( - resources=ResourceContainer(), label="Vertex_{}".format(i)) - machine_graph.add_vertex(vertex) - if last_vertex is not None: - edge = MachineEdge(last_vertex, vertex) - machine_graph.add_edge(edge, "SPIKES") - too_many_vertices.append(vertex) - last_vertex = vertex - - # Do placements - machine = virtual_machine(width=8, height=8) - placements = one_to_one_placer( - machine_graph, machine, plan_n_timesteps=1000) - - # The 1-1 connected vertices should be on the same chip - for chain in one_to_one_chains: - first_placement = placements.get_placement_of_vertex(chain[0]) - for i in range(1, 3): - placement = placements.get_placement_of_vertex(chain[i]) - assert placement.x == first_placement.x - assert placement.y == first_placement.y - - # The other vertices should be on more than one chip - too_many_chips = set() - for vertex in too_many_vertices: - placement = placements.get_placement_of_vertex(vertex) - too_many_chips.add((placement.x, placement.y)) - assert len(too_many_chips) > 1 - - -def test_sdram_links(): - """ Test sdram edges which should explode - """ - unittest_setup() - - # Create a graph - machine_graph = MachineGraph("Test") - - # Connect a set of vertices in a chain of length 3 - last_vertex = None - for x in range(20): - vertex = MockMachineVertex( - resources=ResourceContainer(), - label="Vertex_{}".format(x), sdram_requirement=20) - machine_graph.add_vertex(vertex) - last_vertex = vertex - - for vertex in machine_graph.vertices: - machine_graph.add_outgoing_edge_partition( - ConstantSDRAMMachinePartition( - identifier="SDRAM", pre_vertex=vertex, label="bacon")) - edge = SDRAMMachineEdge(vertex, last_vertex, "bacon", app_edge=None) - machine_graph.add_edge(edge, "SDRAM") - - # Do placements - machine = virtual_machine(width=8, height=8) - with pytest.raises(PacmanException): - one_to_one_placer(machine_graph, machine, plan_n_timesteps=1000) diff --git a/unittests/operations_tests/placer_algorithms_tests/test_radial_placer.py b/unittests/operations_tests/placer_algorithms_tests/test_radial_placer.py deleted file mode 100644 index 9769b070d..000000000 --- a/unittests/operations_tests/placer_algorithms_tests/test_radial_placer.py +++ /dev/null @@ -1,100 +0,0 @@ -# Copyright (c) 2017-2019 The University of Manchester -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . - -import unittest -from spinn_machine import virtual_machine -from pacman.config_setup import unittest_setup -from pacman.model.graphs.common import Slice -from pacman.model.graphs.machine import MachineGraph, SimpleMachineVertex -from pacman.model.resources import ( - ConstantSDRAM, CPUCyclesPerTickResource, DTCMResource, ResourceContainer) -from pacman.exceptions import PacmanValueError -from pacman.model.constraints.placer_constraints import ( - ChipAndCoreConstraint, RadialPlacementFromChipConstraint) -from pacman.operations.placer_algorithms import radial_placer -from pacman_test_objects import (get_resourced_machine_vertex) - - -class TestRadialPlacer(unittest.TestCase): - def setUp(self): - unittest_setup() - self.machine = virtual_machine(8, 8) - self.mach_graph = MachineGraph("machine") - self.vertices = list() - self.vertex1 = get_resourced_machine_vertex(0, 1, "First vertex") - self.vertex2 = get_resourced_machine_vertex(1, 5, "Second vertex") - self.vertex3 = get_resourced_machine_vertex(5, 10, "Third vertex") - self.vertex4 = get_resourced_machine_vertex(10, 100, "Fourth vertex") - self.vertices.append(self.vertex1) - self.mach_graph.add_vertex(self.vertex1) - self.vertices.append(self.vertex2) - self.mach_graph.add_vertex(self.vertex2) - self.vertices.append(self.vertex3) - self.mach_graph.add_vertex(self.vertex3) - self.vertices.append(self.vertex4) - self.mach_graph.add_vertex(self.vertex4) - self.edges = list() - - self.plan_n_timesteps = 100 - - def test_simple(self): - placements = radial_placer(self.mach_graph, self.machine, 100) - self.assertEqual(len(self.vertices), len(placements)) - - def test_place_vertex_too_big_with_vertex(self): - cpu_cycles = 1000 - dtcm_requirement = 1000 - sdram_requirement = self.machine.get_chip_at(0, 0).sdram.size * 20 - rc = ResourceContainer( - cpu_cycles=CPUCyclesPerTickResource(cpu_cycles), - dtcm=DTCMResource(dtcm_requirement), - sdram=ConstantSDRAM(sdram_requirement)) - - large_machine_vertex = SimpleMachineVertex( - rc, vertex_slice=Slice(0, 499), label="Second vertex") - self.mach_graph.add_vertex(large_machine_vertex) - with self.assertRaises(PacmanValueError): - radial_placer(self.mach_graph, self.machine, 100) - - def test_deal_with_constraint_placement_vertices_dont_have_vertex(self): - self.vertex2.add_constraint(ChipAndCoreConstraint(3, 5, 7)) - self.vertex3.add_constraint(RadialPlacementFromChipConstraint(2, 4)) - placements = radial_placer(self.mach_graph, self.machine, 100) - for placement in placements.placements: - if placement.vertex == self.vertex2: - self.assertEqual(placement.x, 3) - self.assertEqual(placement.y, 5) - self.assertEqual(placement.p, 7) - if placement.vertex == self.vertex3: - self.assertEqual(placement.x, 2) - self.assertEqual(placement.y, 4) - self.assertEqual(len(self.vertices), len(placements)) - - def test_fill_machine(self): - graph = MachineGraph("machine") - cores = sum(chip.n_user_processors for chip in self.machine.chips) - for i in range(cores): # 50 atoms per each processor on 20 chips - graph.add_vertex(get_resourced_machine_vertex( - 0, 50, "vertex " + str(i))) - placements = radial_placer(graph, self.machine, 100) - self.assertEqual(len(placements), cores) - # One more vertex should be too many - graph.add_vertex(get_resourced_machine_vertex(0, 50, "toomany")) - with self.assertRaises(PacmanValueError): - radial_placer(graph, self.machine, 100) - - -if __name__ == '__main__': - unittest.main() diff --git a/unittests/operations_tests/placer_algorithms_tests/test_same_chip_constraint.py b/unittests/operations_tests/placer_algorithms_tests/test_same_chip_constraint.py deleted file mode 100644 index f47493175..000000000 --- a/unittests/operations_tests/placer_algorithms_tests/test_same_chip_constraint.py +++ /dev/null @@ -1,97 +0,0 @@ -# Copyright (c) 2017-2019 The University of Manchester -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . - -import random -import unittest -from spinn_machine import virtual_machine -from pacman.config_setup import unittest_setup -from pacman.model.graphs.machine import MachineGraph, SimpleMachineVertex -from pacman.model.resources import ResourceContainer -from pacman.model.constraints.placer_constraints import SameChipAsConstraint -from pacman.model.routing_info import DictBasedMachinePartitionNKeysMap -from pacman.operations.placer_algorithms import ( - connective_based_placer, one_to_one_placer, radial_placer, spreader_placer) -from pacman.exceptions import PacmanInvalidParameterException - - -class TestSameChipConstraint(unittest.TestCase): - - def setUp(self): - unittest_setup() - - def _do_test(self, placer): - machine = virtual_machine(width=8, height=8) - graph = MachineGraph("Test") - - vertices = [ - SimpleMachineVertex(ResourceContainer(), label="v{}".format(i)) - for i in range(100) - ] - for vertex in vertices: - graph.add_vertex(vertex) - - same_vertices = [ - SimpleMachineVertex(ResourceContainer(), label="same{}".format(i)) - for i in range(10) - ] - random.seed(12345) - for vertex in same_vertices: - graph.add_vertex(vertex) - for _i in range(0, random.randint(1, 5)): - vertex.add_constraint( - SameChipAsConstraint( - vertices[random.randint(0, 99)])) - - n_keys_map = DictBasedMachinePartitionNKeysMap() - - if placer == "ConnectiveBasedPlacer": - placements = connective_based_placer(graph, machine, None) - elif placer == "OneToOnePlacer": - placements = one_to_one_placer(graph, machine, None) - elif placer == "RadialPlacer": - placements = radial_placer(graph, machine, None) - elif placer == "SpreaderPlacer": - placements = spreader_placer(graph, machine, n_keys_map, None) - else: - raise NotImplementedError(placer) - - for same in same_vertices: - print("{0.vertex.label}, {0.x}, {0.y}, {0.p}: {1}".format( - placements.get_placement_of_vertex(same), - ["{0.vertex.label}, {0.x}, {0.y}, {0.p}".format( - placements.get_placement_of_vertex(constraint.vertex)) - for constraint in same.constraints])) - placement = placements.get_placement_of_vertex(same) - for constraint in same.constraints: - if isinstance(constraint, SameChipAsConstraint): - other_placement = placements.get_placement_of_vertex( - constraint.vertex) - self.assertTrue( - other_placement.x == placement.x and - other_placement.y == placement.y, - "Vertex was not placed on the same chip as requested") - - def test_connective_based(self): - with self.assertRaises(PacmanInvalidParameterException): - self._do_test("ConnectiveBasedPlacer") - - def test_one_to_one(self): - self._do_test("OneToOnePlacer") - - def test_radial(self): - self._do_test("RadialPlacer") - - def test_spreader(self): - self._do_test("SpreaderPlacer") diff --git a/unittests/operations_tests/placer_algorithms_tests/test_sdram_edge_placement.py b/unittests/operations_tests/placer_algorithms_tests/test_sdram_edge_placement.py deleted file mode 100644 index 4331e8765..000000000 --- a/unittests/operations_tests/placer_algorithms_tests/test_sdram_edge_placement.py +++ /dev/null @@ -1,99 +0,0 @@ -# Copyright (c) 2017-2019 The University of Manchester -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . - -import random -import unittest -from spinn_machine import virtual_machine -from pacman.config_setup import unittest_setup -from pacman.exceptions import PacmanAlreadyPlacedError -from pacman.model.graphs.machine import ( - MachineGraph, SDRAMMachineEdge) -from pacman.model.graphs.machine import ConstantSDRAMMachinePartition -from pacman.model.resources import ResourceContainer -from pacman.model.routing_info import DictBasedMachinePartitionNKeysMap -from pacman.operations.placer_algorithms import ( - connective_based_placer, one_to_one_placer, radial_placer, spreader_placer) -from pacman_test_objects import MockMachineVertex - - -class TestSameChipConstraint(unittest.TestCase): - - def setUp(cls): - unittest_setup() - - def _do_test(self, placer): - machine = virtual_machine(width=8, height=8) - graph = MachineGraph("Test") - - vertices = [ - MockMachineVertex( - ResourceContainer(), label="v{}".format(i), - sdram_requirement=20) - for i in range(100) - ] - for vertex in vertices: - graph.add_vertex(vertex) - - same_vertices = [ - MockMachineVertex(ResourceContainer(), label="same{}".format(i), - sdram_requirement=20) - for i in range(10) - ] - random.seed(12345) - sdram_edges = list() - for vertex in same_vertices: - graph.add_vertex(vertex) - graph.add_outgoing_edge_partition( - ConstantSDRAMMachinePartition( - identifier="Test", pre_vertex=vertex, label="bacon")) - for _i in range(0, random.randint(1, 5)): - sdram_edge = SDRAMMachineEdge( - vertex, vertices[random.randint(0, 99)], label="bacon", - app_edge=None) - sdram_edges.append(sdram_edge) - graph.add_edge(sdram_edge, "Test") - n_keys_map = DictBasedMachinePartitionNKeysMap() - - if placer == "ConnectiveBasedPlacer": - placements = connective_based_placer(graph, machine, None) - elif placer == "OneToOnePlacer": - placements = one_to_one_placer(graph, machine, None) - elif placer == "RadialPlacer": - placements = radial_placer(graph, machine, None) - elif placer == "SpreaderPlacer": - placements = spreader_placer(graph, machine, n_keys_map, None) - else: - raise NotImplementedError(placer) - for edge in sdram_edges: - pre_place = placements.get_placement_of_vertex(edge.pre_vertex) - post_place = placements.get_placement_of_vertex(edge.post_vertex) - assert pre_place.x == post_place.x - assert pre_place.y == post_place.y - - def test_connective_based(self): - try: - self._do_test("ConnectiveBasedPlacer") - except PacmanAlreadyPlacedError: - raise unittest.SkipTest( - "https://github.com/SpiNNakerManchester/PACMAN/issues/406") - - def test_one_to_one(self): - self._do_test("OneToOnePlacer") - - def test_radial(self): - self._do_test("RadialPlacer") - - def test_spreader(self): - self._do_test("SpreaderPlacer") diff --git a/unittests/operations_tests/placer_algorithms_tests/test_spreader_placer.py b/unittests/operations_tests/placer_algorithms_tests/test_spreader_placer.py deleted file mode 100644 index 7fcbd33e4..000000000 --- a/unittests/operations_tests/placer_algorithms_tests/test_spreader_placer.py +++ /dev/null @@ -1,189 +0,0 @@ -# Copyright (c) 2017-2019 The University of Manchester -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . -import pytest -from spinn_machine.virtual_machine import virtual_machine -from pacman.config_setup import unittest_setup -from pacman.exceptions import PacmanException -from pacman.model.graphs.machine import ( - MachineGraph, SimpleMachineVertex, MachineSpiNNakerLinkVertex, - MachineEdge, SDRAMMachineEdge) -from pacman.model.graphs.machine import ConstantSDRAMMachinePartition -from pacman.model.resources.resource_container import ResourceContainer -from pacman.model.constraints.placer_constraints import ChipAndCoreConstraint -from pacman.operations.placer_algorithms import spreader_placer -from pacman.model.routing_info import DictBasedMachinePartitionNKeysMap -from pacman.operations.chip_id_allocator_algorithms import ( - malloc_based_chip_id_allocator) -from pacman_test_objects import MockMachineVertex - - -def test_virtual_vertices_spreader(): - """ Test that the placer works with a virtual vertex - """ - unittest_setup() - - # Create a graph with a virtual vertex - machine_graph = MachineGraph("Test") - virtual_vertex = MachineSpiNNakerLinkVertex( - spinnaker_link_id=0, label="Virtual") - machine_graph.add_vertex(virtual_vertex) - - # These vertices are fixed on 0, 0 - misc_vertices = list() - for i in range(3): - misc_vertex = SimpleMachineVertex( - resources=ResourceContainer(), constraints=[ - ChipAndCoreConstraint(0, 0)], - label="Fixed_0_0_{}".format(i)) - machine_graph.add_vertex(misc_vertex) - misc_vertices.append(misc_vertex) - - # These vertices are 1-1 connected to the virtual vertex - one_to_one_vertices = list() - for i in range(16): - one_to_one_vertex = SimpleMachineVertex( - resources=ResourceContainer(), - label="Vertex_{}".format(i)) - machine_graph.add_vertex(one_to_one_vertex) - edge = MachineEdge(virtual_vertex, one_to_one_vertex) - machine_graph.add_edge(edge, "SPIKES") - one_to_one_vertices.append(one_to_one_vertex) - - n_keys_map = DictBasedMachinePartitionNKeysMap() - partition = machine_graph.get_outgoing_edge_partition_starting_at_vertex( - virtual_vertex, "SPIKES") - n_keys_map.set_n_keys_for_partition(partition, 1) - - # Get and extend the machine for the virtual chip - machine = virtual_machine(width=8, height=8) - extended_machine = malloc_based_chip_id_allocator(machine, machine_graph) - - # Do placements - placements = spreader_placer( - machine_graph, extended_machine, n_keys_map, plan_n_timesteps=1000) - - # The virtual vertex should be on a virtual chip - placement = placements.get_placement_of_vertex(virtual_vertex) - assert machine.get_chip_at(placement.x, placement.y).virtual - - # The 0, 0 vertices should be on 0, 0 - for vertex in misc_vertices: - placement = placements.get_placement_of_vertex(vertex) - assert placement.x == placement.y == 0 - - # The other vertices should *not* be on a virtual chip - for vertex in one_to_one_vertices: - placement = placements.get_placement_of_vertex(vertex) - assert not machine.get_chip_at(placement.x, placement.y).virtual - - -def test_one_to_one(): - """ Test normal 1-1 placement - """ - unittest_setup() - - # Create a graph - machine_graph = MachineGraph("Test") - - # Connect a set of vertices in a chain of length 3 - n_keys_map = DictBasedMachinePartitionNKeysMap() - one_to_one_chains = list() - for i in range(10): - last_vertex = None - chain = list() - for j in range(3): - vertex = SimpleMachineVertex( - resources=ResourceContainer(), - label="Vertex_{}_{}".format(i, j)) - machine_graph.add_vertex(vertex) - if last_vertex is not None: - edge = MachineEdge(last_vertex, vertex) - machine_graph.add_edge(edge, "SPIKES") - partition = machine_graph\ - .get_outgoing_edge_partition_starting_at_vertex( - last_vertex, "SPIKES") - n_keys_map.set_n_keys_for_partition(partition, 1) - last_vertex = vertex - chain.append(vertex) - one_to_one_chains.append(chain) - - # Connect a set of 20 vertices in a chain - too_many_vertices = list() - last_vertex = None - for i in range(20): - vertex = SimpleMachineVertex( - resources=ResourceContainer(), label="Vertex_{}".format(i)) - machine_graph.add_vertex(vertex) - if last_vertex is not None: - edge = MachineEdge(last_vertex, vertex) - machine_graph.add_edge(edge, "SPIKES") - partition = machine_graph\ - .get_outgoing_edge_partition_starting_at_vertex( - last_vertex, "SPIKES") - n_keys_map.set_n_keys_for_partition(partition, 1) - too_many_vertices.append(vertex) - last_vertex = vertex - - # Do placements - machine = virtual_machine(width=8, height=8) - placements = spreader_placer( - machine_graph, machine, n_keys_map, plan_n_timesteps=1000) - - # The 1-1 connected vertices should be on the same chip - for chain in one_to_one_chains: - first_placement = placements.get_placement_of_vertex(chain[0]) - for i in range(1, 3): - placement = placements.get_placement_of_vertex(chain[i]) - assert placement.x == first_placement.x - assert placement.y == first_placement.y - - # The other vertices should be on more than one chip - too_many_chips = set() - for vertex in too_many_vertices: - placement = placements.get_placement_of_vertex(vertex) - too_many_chips.add((placement.x, placement.y)) - assert len(too_many_chips) > 1 - - -def test_sdram_links(): - """ Test sdram edges which should explode - """ - unittest_setup() - - # Create a graph - machine_graph = MachineGraph("Test") - - # Connect a set of vertices in a chain of length 3 - last_vertex = None - for x in range(20): - vertex = MockMachineVertex( - resources=ResourceContainer(), - label="Vertex_{}".format(x), sdram_requirement=20) - machine_graph.add_vertex(vertex) - last_vertex = vertex - - for vertex in machine_graph.vertices: - machine_graph.add_outgoing_edge_partition( - ConstantSDRAMMachinePartition( - identifier="SDRAM", pre_vertex=vertex, label="bacon")) - edge = SDRAMMachineEdge(vertex, last_vertex, "bacon", app_edge=None) - machine_graph.add_edge(edge, "SDRAM") - n_keys_map = DictBasedMachinePartitionNKeysMap() - - # Do placements - machine = virtual_machine(width=8, height=8) - with pytest.raises(PacmanException): - spreader_placer( - machine_graph, machine, n_keys_map, plan_n_timesteps=1000) diff --git a/unittests/operations_tests/placer_algorithms_tests/test_virtual_placement.py b/unittests/operations_tests/placer_algorithms_tests/test_virtual_placement.py deleted file mode 100644 index 4a77672e6..000000000 --- a/unittests/operations_tests/placer_algorithms_tests/test_virtual_placement.py +++ /dev/null @@ -1,54 +0,0 @@ -# Copyright (c) 2017-2019 The University of Manchester -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . - -import pytest -from spinn_machine import virtual_machine -from pacman.config_setup import unittest_setup -from pacman.model.graphs.machine import ( - MachineGraph, MachineSpiNNakerLinkVertex) -from pacman.operations.chip_id_allocator_algorithms import ( - malloc_based_chip_id_allocator) -from pacman.model.routing_info import DictBasedMachinePartitionNKeysMap -from pacman.operations.placer_algorithms import ( - connective_based_placer, one_to_one_placer, radial_placer, spreader_placer) - - -@pytest.mark.parametrize( - "placer", - ["ConnectiveBasedPlacer", "OneToOnePlacer", "RadialPlacer", - "SpreaderPlacer"]) -def test_virtual_placement(placer): - unittest_setup() - machine = virtual_machine(width=8, height=8) - graph = MachineGraph("Test") - virtual_vertex = MachineSpiNNakerLinkVertex(spinnaker_link_id=0) - graph.add_vertex(virtual_vertex) - extended_machine = malloc_based_chip_id_allocator(machine, graph) - n_keys_map = DictBasedMachinePartitionNKeysMap() - - if placer == "ConnectiveBasedPlacer": - placements = connective_based_placer(graph, machine, None) - elif placer == "OneToOnePlacer": - placements = one_to_one_placer(graph, machine, None) - elif placer == "RadialPlacer": - placements = radial_placer(graph, machine, None) - elif placer == "SpreaderPlacer": - placements = spreader_placer(graph, machine, n_keys_map, None) - else: - raise NotImplementedError(placer) - - placement = placements.get_placement_of_vertex(virtual_vertex) - chip = extended_machine.get_chip_at(placement.x, placement.y) - assert chip.virtual diff --git a/unittests/operations_tests/router_algorithms_tests/test_basic_dijkstra_routing.py b/unittests/operations_tests/router_algorithms_tests/test_basic_dijkstra_routing.py deleted file mode 100644 index 2059b3c43..000000000 --- a/unittests/operations_tests/router_algorithms_tests/test_basic_dijkstra_routing.py +++ /dev/null @@ -1,94 +0,0 @@ -# Copyright (c) 2017-2019 The University of Manchester -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . - -import unittest -from collections import deque - -from pacman.config_setup import unittest_setup -from pacman.model.graphs.machine import MulticastEdgePartition -from spinn_machine.virtual_machine import virtual_machine -from pacman.model.graphs.machine import ( - MachineGraph, MachineEdge, SimpleMachineVertex) -from pacman.operations.router_algorithms import basic_dijkstra_routing -from pacman.model.resources import ResourceContainer -from pacman.model.placements import Placements, Placement - - -class TestBasicDijkstraRouting(unittest.TestCase): - - def setUp(self): - unittest_setup() - - def test_routing(self): - graph = MachineGraph("Test") - machine = virtual_machine(2, 2) - placements = Placements() - vertices = list() - - for chip in machine.chips: - for processor in chip.processors: - if not processor.is_monitor: - vertex = SimpleMachineVertex(resources=ResourceContainer()) - graph.add_vertex(vertex) - placements.add_placement(Placement( - vertex, chip.x, chip.y, processor.processor_id)) - vertices.append(vertex) - - for vertex in vertices: - graph.add_outgoing_edge_partition( - MulticastEdgePartition( - identifier="Test", pre_vertex=vertex)) - for vertex_to in vertices: - if vertex != vertex_to: - graph.add_edge(MachineEdge(vertex, vertex_to), "Test") - - routing_paths = basic_dijkstra_routing(placements, machine, graph) - - for vertex in vertices: - vertices_reached = set() - queue = deque() - seen_entries = set() - placement = placements.get_placement_of_vertex(vertex) - partition = graph.get_outgoing_edge_partition_starting_at_vertex( - vertex, "Test") - entry = routing_paths.get_entry_on_coords_for_edge( - partition, placement.x, placement.y) - self.assertEqual(entry.incoming_processor, placement.p) - queue.append((placement.x, placement.y)) - while len(queue) > 0: - x, y = queue.pop() - entry = routing_paths.get_entry_on_coords_for_edge( - partition, x, y) - self.assertIsNotNone(entry) - chip = machine.get_chip_at(x, y) - for p in entry.processor_ids: - self.assertIsNotNone(chip.get_processor_with_id(p)) - vertex_found = placements.get_vertex_on_processor(x, y, p) - vertices_reached.add(vertex_found) - seen_entries.add((x, y)) - for link_id in entry.link_ids: - link = chip.router.get_link(link_id) - self.assertIsNotNone(link) - dest_x, dest_y = link.destination_x, link.destination_y - if (dest_x, dest_y) not in seen_entries: - queue.append((dest_x, dest_y)) - - for vertex_to in vertices: - if vertex != vertex_to: - self.assertIn(vertex_to, vertices_reached) - - -if __name__ == '__main__': - unittest.main() diff --git a/unittests/operations_tests/router_algorithms_tests/test_ner_route_default.py b/unittests/operations_tests/router_algorithms_tests/test_ner_route_default.py deleted file mode 100644 index cd0ad9886..000000000 --- a/unittests/operations_tests/router_algorithms_tests/test_ner_route_default.py +++ /dev/null @@ -1,47 +0,0 @@ -# Copyright (c) 2021 The University of Manchester -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . -from pacman.config_setup import unittest_setup -from pacman.model.graphs.machine import ( - MachineGraph, MachineEdge, SimpleMachineVertex) -from spinn_machine import virtual_machine -from pacman.model.placements import Placement, Placements -from pacman.operations.router_algorithms.ner_route import ner_route - - -def test_ner_route_default(): - unittest_setup() - graph = MachineGraph("Test") - machine = virtual_machine(8, 8) - placements = Placements() - - source_vertex = SimpleMachineVertex(None) - graph.add_vertex(source_vertex) - placements.add_placement(Placement(source_vertex, 0, 0, 1)) - target_vertex = SimpleMachineVertex(None) - graph.add_vertex(target_vertex) - placements.add_placement(Placement(target_vertex, 0, 2, 1)) - edge = MachineEdge(source_vertex, target_vertex) - graph.add_edge(edge, "Test") - partition = graph.get_outgoing_partition_for_edge(edge) - - routes = ner_route(graph, machine, placements) - - source_route = routes.get_entries_for_router(0, 0)[partition] - assert(not source_route.defaultable) - mid_route = routes.get_entries_for_router(0, 1)[partition] - print(mid_route.incoming_link, mid_route.link_ids) - assert(mid_route.defaultable) - end_route = routes.get_entries_for_router(0, 2)[partition] - assert(not end_route.defaultable) diff --git a/unittests/operations_tests/router_algorithms_tests/test_ner_route_traffic_aware.py b/unittests/operations_tests/router_algorithms_tests/test_ner_route_traffic_aware.py deleted file mode 100644 index 33dd6e3aa..000000000 --- a/unittests/operations_tests/router_algorithms_tests/test_ner_route_traffic_aware.py +++ /dev/null @@ -1,92 +0,0 @@ -# Copyright (c) 2017-2019 The University of Manchester -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . - -import unittest -from collections import deque - -from spinn_utilities.config_holder import set_config -from spinn_machine.virtual_machine import virtual_machine -from pacman.config_setup import unittest_setup -from pacman.model.graphs.machine import ( - MachineGraph, MachineEdge, MulticastEdgePartition, SimpleMachineVertex) -from pacman.operations.router_algorithms import ner_route_traffic_aware -from pacman.model.resources import ResourceContainer -from pacman.model.placements import Placements, Placement - - -class TestNerRouteTrafficAware(unittest.TestCase): - - def setUp(cls): - unittest_setup() - - def test_routing(self): - graph = MachineGraph("Test") - set_config("Machine", "down_chips", "1,2:5,4:3,3") - machine = virtual_machine(8, 8) - placements = Placements() - vertices = list() - - for chip in machine.chips: - for processor in chip.processors: - if not processor.is_monitor: - vertex = SimpleMachineVertex(resources=ResourceContainer()) - graph.add_vertex(vertex) - placements.add_placement(Placement( - vertex, chip.x, chip.y, processor.processor_id)) - vertices.append(vertex) - - for vertex in vertices: - graph.add_outgoing_edge_partition( - MulticastEdgePartition(identifier="Test", pre_vertex=vertex)) - for vertex_to in vertices: - graph.add_edge(MachineEdge(vertex, vertex_to), "Test") - - routing_paths = ner_route_traffic_aware(graph, machine, placements) - - for vertex in vertices: - vertices_reached = set() - queue = deque() - seen_entries = set() - placement = placements.get_placement_of_vertex(vertex) - partition = graph.get_outgoing_edge_partition_starting_at_vertex( - vertex, "Test") - entry = routing_paths.get_entry_on_coords_for_edge( - partition, placement.x, placement.y) - self.assertEqual(entry.incoming_processor, placement.p) - queue.append((placement.x, placement.y)) - while len(queue) > 0: - x, y = queue.pop() - entry = routing_paths.get_entry_on_coords_for_edge( - partition, x, y) - self.assertIsNotNone(entry) - chip = machine.get_chip_at(x, y) - for p in entry.processor_ids: - self.assertIsNotNone(chip.get_processor_with_id(p)) - vertex_found = placements.get_vertex_on_processor(x, y, p) - vertices_reached.add(vertex_found) - seen_entries.add((x, y)) - for link_id in entry.link_ids: - link = chip.router.get_link(link_id) - self.assertIsNotNone(link) - dest_x, dest_y = link.destination_x, link.destination_y - if (dest_x, dest_y) not in seen_entries: - queue.append((dest_x, dest_y)) - - for vertex_to in vertices: - self.assertIn(vertex_to, vertices_reached) - - -if __name__ == '__main__': - unittest.main() diff --git a/unittests/operations_tests/router_algorithms_tests/test_routers.py b/unittests/operations_tests/router_algorithms_tests/test_routers.py new file mode 100644 index 000000000..a256df8e6 --- /dev/null +++ b/unittests/operations_tests/router_algorithms_tests/test_routers.py @@ -0,0 +1,708 @@ +# Copyright (c) 2022 The University of Manchester +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +from spinn_utilities.timer import Timer +from spinn_utilities.config_holder import set_config +from spinn_machine import virtual_machine +from pacman.model.graphs.application import ( + ApplicationVertex, ApplicationGraph, ApplicationEdge, + ApplicationSpiNNakerLinkVertex, ApplicationFPGAVertex) +from pacman.model.graphs.machine import MulticastEdgePartition, MachineEdge +from pacman.model.partitioner_splitters import SplitterFixedLegacy +from pacman.utilities.utility_objs import ChipCounter +from pacman.operations.placer_algorithms.application_placer import ( + place_application_graph) +from pacman.operations.router_algorithms.application_router import ( + route_application_graph, _path_without_errors) +from pacman.operations.router_algorithms.basic_dijkstra_routing import ( + basic_dijkstra_routing) +from pacman.operations.router_algorithms.ner_route import ( + ner_route, ner_route_traffic_aware) +from pacman.utilities.algorithm_utilities.routing_algorithm_utilities import ( + longest_dimension_first, get_app_partitions, vertex_xy, + vertex_xy_and_route) +from pacman.model.partitioner_splitters.abstract_splitters import ( + AbstractSplitterCommon) +from pacman.config_setup import unittest_setup +from pacman.model.graphs.machine import SimpleMachineVertex +from pacman.model.placements import Placements, Placement +from pacman.model.resources import ResourceContainer, ConstantSDRAM +from pacman.model.graphs import AbstractFPGA, AbstractSpiNNakerLink + +from collections import defaultdict +import math +import pytest + + +@pytest.fixture(params=[ + (route_application_graph, 10, 50), + (basic_dijkstra_routing, 10, 10), + (ner_route, 10, 50), + (ner_route_traffic_aware, 10, 50)]) +def params(request): + return request.param + + +class TestSplitter(AbstractSplitterCommon): + + def __init__(self, n_machine_vertices): + AbstractSplitterCommon.__init__(self) + self.__n_machine_vertices = n_machine_vertices + + def create_machine_vertices(self, chip_counter): + m_vertices = [ + SimpleMachineVertex( + ResourceContainer(), app_vertex=self._governed_app_vertex, + label=f"{self._governed_app_vertex.label}_{i}") + for i in range(self.__n_machine_vertices)] + for m_vertex in m_vertices: + self._governed_app_vertex.remember_machine_vertex(m_vertex) + + def get_out_going_slices(self): + return None + + def get_in_coming_slices(self): + return None + + def get_out_going_vertices(self, partition_id): + return self._governed_app_vertex.machine_vertices + + def get_in_coming_vertices(self, partition_id): + return self._governed_app_vertex.machine_vertices + + def machine_vertices_for_recording(self, variable_to_record): + return [] + + def reset_called(self): + pass + + +class TestMultiInputSplitter(AbstractSplitterCommon): + + def __init__(self, n_incoming_machine_vertices, + n_outgoing_machine_vertices, n_groups, + internal_multicast=False): + AbstractSplitterCommon.__init__(self) + self.__n_incoming_machine_vertices = n_incoming_machine_vertices + self.__n_outgoing_machine_vertices = n_outgoing_machine_vertices + self.__n_groups = n_groups + self.__internal_multicast = internal_multicast + self.__same_chip_groups = list() + self.__incoming_machine_vertices = [ + list() for _ in range(n_incoming_machine_vertices)] + self.__outgoing_machine_vertices = list() + self.__internal_multicast_partitions = list() + + def create_machine_vertices(self, chip_counter): + last_incoming = None + for i in range(self.__n_groups): + incoming = [ + SimpleMachineVertex( + ResourceContainer(), app_vertex=self._governed_app_vertex, + label=f"{self._governed_app_vertex.label}_{i}_{j}") + for j in range(self.__n_incoming_machine_vertices)] + outgoing = [ + SimpleMachineVertex( + ResourceContainer(), app_vertex=self._governed_app_vertex, + label=f"{self._governed_app_vertex.label}_{i}_{j}") + for j in range(self.__n_outgoing_machine_vertices)] + self.__same_chip_groups.append( + (incoming + outgoing, ConstantSDRAM(0))) + self.__outgoing_machine_vertices.extend(outgoing) + for out in outgoing: + self._governed_app_vertex.remember_machine_vertex(out) + for j in range(self.__n_incoming_machine_vertices): + self._governed_app_vertex.remember_machine_vertex(incoming[j]) + self.__incoming_machine_vertices[j].append(incoming[j]) + if self.__internal_multicast: + if last_incoming is not None: + for this_in in incoming: + in_part = MulticastEdgePartition(this_in, "internal") + self.__internal_multicast_partitions.append(in_part) + for last_in in last_incoming: + in_part.add_edge( + MachineEdge(this_in, last_in)) + last_incoming = incoming + + def get_out_going_slices(self): + return None + + def get_in_coming_slices(self): + return None + + def get_out_going_vertices(self, partition_id): + return self.__outgoing_machine_vertices + + def get_in_coming_vertices(self, partition_id): + return [v for lst in self.__incoming_machine_vertices for v in lst] + + def get_source_specific_in_coming_vertices( + self, source_vertex, partition_id): + sources = source_vertex.splitter.get_out_going_vertices(partition_id) + n_sources = len(sources) + sources_per_incoming = int(math.ceil( + n_sources / self.__n_incoming_machine_vertices)) + result = list() + for i in range(self.__n_incoming_machine_vertices): + start = sources_per_incoming * i + end = start + sources_per_incoming + if (i + 1) == self.__n_incoming_machine_vertices: + end = n_sources + source_range = sources[start:end] + for i_vertex in self.__incoming_machine_vertices[i]: + result.append((i_vertex, source_range)) + return result + + def machine_vertices_for_recording(self, variable_to_record): + return [] + + def get_internal_multicast_partitions(self): + return self.__internal_multicast_partitions + + def reset_called(self): + pass + + def get_same_chip_groups(self): + return self.__same_chip_groups + + +class TestOneToOneSplitter(AbstractSplitterCommon): + + def __init__(self, n_machine_vertices): + AbstractSplitterCommon.__init__(self) + self.__n_machine_vertices = n_machine_vertices + + def create_machine_vertices(self, chip_counter): + m_vertices = [ + SimpleMachineVertex( + ResourceContainer(), app_vertex=self._governed_app_vertex, + label=f"{self._governed_app_vertex.label}_{i}") + for i in range(self.__n_machine_vertices)] + for m_vertex in m_vertices: + self._governed_app_vertex.remember_machine_vertex(m_vertex) + + def get_out_going_slices(self): + return None + + def get_in_coming_slices(self): + return None + + def get_out_going_vertices(self, partition_id): + return self._governed_app_vertex.machine_vertices + + def get_in_coming_vertices(self, partition_id): + return self._governed_app_vertex.machine_vertices + + def machine_vertices_for_recording(self, variable_to_record): + return [] + + def reset_called(self): + pass + + def get_source_specific_in_coming_vertices( + self, source_vertex, partition_id): + return [ + (target, [source]) + for source, target in zip( + source_vertex.splitter.get_out_going_vertices(partition_id), + self._governed_app_vertex.machine_vertices)] + + +class TestAppVertex(ApplicationVertex): + def __init__(self, n_atoms, label): + super(TestAppVertex, self).__init__(label) + self.__n_atoms = n_atoms + + @property + def n_atoms(self): + return self.__n_atoms + + +def _make_vertices(app_graph, n_atoms, n_machine_vertices, label): + vertex = TestAppVertex(n_atoms, label) + vertex.splitter = TestSplitter(n_machine_vertices) + app_graph.add_vertex(vertex) + vertex.splitter.create_machine_vertices(None) + return vertex + + +def _make_one_to_one_vertices(app_graph, n_atoms, n_machine_vertices, label): + vertex = TestAppVertex(n_atoms, label) + vertex.splitter = TestOneToOneSplitter(n_machine_vertices) + app_graph.add_vertex(vertex) + vertex.splitter.create_machine_vertices(None) + return vertex + + +def _make_vertices_split( + app_graph, n_atoms, n_incoming, n_outgoing, n_groups, label, + internal_multicast=False): + vertex = TestAppVertex(n_atoms, label) + vertex.splitter = TestMultiInputSplitter( + n_incoming, n_outgoing, n_groups, internal_multicast) + app_graph.add_vertex(vertex) + vertex.splitter.create_machine_vertices(None) + return vertex + + +def _get_entry(routing_tables, x, y, source_vertex, partition_id, allow_none): + app_entry = routing_tables.get_entry_on_coords_for_edge( + source_vertex.app_vertex, partition_id, x, y) + entry = routing_tables.get_entry_on_coords_for_edge( + source_vertex, partition_id, x, y) + + if entry is None and app_entry is None: + if allow_none: + return None + raise Exception( + f"No entry found on {x}, {y} for {source_vertex}, {partition_id}") + if entry is not None and app_entry is not None: + raise Exception( + f"App-entry and non-app-entry found on {x}, {y} for" + f" {source_vertex}, {partition_id}: {app_entry}: {entry}") + if app_entry is not None: + return app_entry + return entry + + +def _find_targets( + routing_tables, machine, placements, expected_virtual, source_vertex, + partition_id): + found_targets = set() + to_follow = list() + x, y = vertex_xy(source_vertex, placements, machine) + first_entry = _get_entry( + routing_tables, x, y, source_vertex, partition_id, True) + if first_entry is None: + return found_targets + to_follow.append((x, y, first_entry)) + visited = set() + while to_follow: + x, y, next_to_follow = to_follow.pop() + if not machine.is_chip_at(x, y): + raise Exception( + f"Route goes through {x}, {y} but that doesn't exist!") + if (x, y) in visited: + raise Exception( + f"Potential loop found when going through {x}, {y}") + visited.add((x, y)) + for p in next_to_follow.processor_ids: + if (x, y, p) in found_targets: + raise Exception( + f"Potential Loop found when adding routes at {x}, {y}") + found_targets.add(((x, y), p, None)) + for link in next_to_follow.link_ids: + if (x, y, link) in expected_virtual: + found_targets.add(((x, y), None, link)) + else: + if not machine.is_link_at(x, y, link): + raise Exception( + f"Route from {source_vertex}, {partition_id} uses link" + f" {x}, {y}, {link} but that doesn't exist!") + next_x, next_y = machine.xy_over_link(x, y, link) + to_follow.append((next_x, next_y, _get_entry( + routing_tables, next_x, next_y, source_vertex, + partition_id, False))) + return found_targets + + +def _add_virtual(expected_virtual, vertex, machine): + link_data = None + if isinstance(vertex, AbstractFPGA): + link_data = machine.get_fpga_link_with_id( + vertex.fpga_id, vertex.fpga_link_id, vertex.board_address) + elif isinstance(vertex, AbstractSpiNNakerLink): + link_data = machine.get_spinnaker_link_with_id( + vertex.spinnaker_link_id, vertex.board_address) + if link_data is not None: + expected_virtual.add(( + link_data.connected_chip_x, link_data.connected_chip_y, + link_data.connected_link)) + + +def _check_edges(routing_tables, machine, placements, app_graph): + for part in get_app_partitions(app_graph): + + # Find the required targets + required_targets = defaultdict(set) + expected_virtual = set() + for edge in part.edges: + post = edge.post_vertex + targets = post.splitter.get_source_specific_in_coming_vertices( + edge.pre_vertex, part.identifier) + for tgt, srcs in targets: + _add_virtual(expected_virtual, tgt, machine) + xy, (m_vertex, core, link) = vertex_xy_and_route( + tgt, placements, machine) + for src in srcs: + if isinstance(src, ApplicationVertex): + for m_vertex in src.splitter.get_out_going_vertices( + part.identifier): + required_targets[m_vertex].add((xy, core, link)) + else: + required_targets[src].add((xy, core, link)) + + splitter = part.pre_vertex.splitter + outgoing = set(splitter.get_out_going_vertices(part.identifier)) + for in_part in splitter.get_internal_multicast_partitions(): + if in_part.identifier == part.identifier: + outgoing.add(in_part.pre_vertex) + for edge in in_part.edges: + xy, (m_vertex, core, link) = vertex_xy_and_route( + edge.post_vertex, placements, machine) + required_targets[in_part.pre_vertex].add((xy, core, link)) + _add_virtual(expected_virtual, edge.post_vertex, machine) + + for m_vertex in outgoing: + actual_targets = _find_targets( + routing_tables, machine, placements, expected_virtual, + m_vertex, part.identifier) + assert(not actual_targets.difference(required_targets[m_vertex])) + + +def _route_and_time(machine, app_graph, placements, algorithm): + timer = Timer() + with timer: + result = algorithm(machine, app_graph, placements) + print(f"Routing took {timer.measured_interval}") + return result + + +def test_simple(params): + algorithm, _n_vertices, n_m_vertices = params + unittest_setup() + app_graph = ApplicationGraph("Test") + source_vertex = _make_vertices(app_graph, 1000, n_m_vertices, "source") + target_vertex = _make_vertices(app_graph, 1000, n_m_vertices, "target") + app_graph.add_edge(ApplicationEdge(source_vertex, target_vertex), "Test") + + machine = virtual_machine(8, 8) + placements = place_application_graph(machine, app_graph, 100, Placements()) + routing_tables = _route_and_time(machine, app_graph, placements, algorithm) + _check_edges(routing_tables, machine, placements, app_graph) + + +def test_self(params): + algorithm, _n_vertices, n_m_vertices = params + unittest_setup() + app_graph = ApplicationGraph("Test") + source_vertex = _make_vertices(app_graph, 1000, n_m_vertices, "self") + app_graph.add_edge(ApplicationEdge(source_vertex, source_vertex), "Test") + + machine = virtual_machine(8, 8) + placements = place_application_graph(machine, app_graph, 100, Placements()) + routing_tables = _route_and_time(machine, app_graph, placements, algorithm) + _check_edges(routing_tables, machine, placements, app_graph) + + +def test_simple_self(params): + algorithm, _n_vertices, n_m_vertices = params + unittest_setup() + app_graph = ApplicationGraph("Test") + source_vertex = _make_vertices(app_graph, 1000, n_m_vertices, "source") + target_vertex = _make_vertices(app_graph, 1000, n_m_vertices, "target") + app_graph.add_edge(ApplicationEdge(source_vertex, source_vertex), "Test") + app_graph.add_edge(ApplicationEdge(target_vertex, target_vertex), "Test") + app_graph.add_edge(ApplicationEdge(source_vertex, target_vertex), "Test") + + machine = virtual_machine(8, 8) + placements = place_application_graph(machine, app_graph, 100, Placements()) + routing_tables = _route_and_time(machine, app_graph, placements, algorithm) + _check_edges(routing_tables, machine, placements, app_graph) + + +def test_multi(params): + algorithm, n_vertices, n_m_vertices = params + unittest_setup() + app_graph = ApplicationGraph("Test") + for i in range(n_vertices): + _make_vertices(app_graph, 1000, n_m_vertices, f"app_vertex_{i}") + for source in app_graph.vertices: + for target in app_graph.vertices: + if source != target: + app_graph.add_edge(ApplicationEdge(source, target), "Test") + + machine = virtual_machine(8, 8) + placements = place_application_graph(machine, app_graph, 100, Placements()) + routing_tables = _route_and_time(machine, app_graph, placements, algorithm) + _check_edges(routing_tables, machine, placements, app_graph) + + +def test_multi_self(params): + algorithm, n_vertices, n_m_vertices = params + unittest_setup() + app_graph = ApplicationGraph("Test") + for i in range(n_vertices): + _make_vertices(app_graph, 1000, n_m_vertices, f"app_vertex_{i}") + for source in app_graph.vertices: + for target in app_graph.vertices: + app_graph.add_edge(ApplicationEdge(source, target), "Test") + + machine = virtual_machine(8, 8) + placements = place_application_graph(machine, app_graph, 100, Placements()) + routing_tables = _route_and_time(machine, app_graph, placements, algorithm) + _check_edges(routing_tables, machine, placements, app_graph) + + +def test_multi_split(params): + algorithm, n_vertices, n_m_vertices = params + unittest_setup() + app_graph = ApplicationGraph("Test") + for i in range(n_vertices): + _make_vertices_split(app_graph, 1000, 3, 2, n_m_vertices, + f"app_vertex_{i}") + for source in app_graph.vertices: + for target in app_graph.vertices: + if source != target: + app_graph.add_edge(ApplicationEdge(source, target), "Test") + + machine = virtual_machine(24, 24) + placements = place_application_graph(machine, app_graph, 100, Placements()) + routing_tables = _route_and_time(machine, app_graph, placements, algorithm) + _check_edges(routing_tables, machine, placements, app_graph) + + +def test_multi_self_split(params): + algorithm, n_vertices, n_m_vertices = params + unittest_setup() + app_graph = ApplicationGraph("Test") + for i in range(n_vertices): + _make_vertices_split(app_graph, 1000, 3, 2, n_m_vertices, + f"app_vertex_{i}") + for source in app_graph.vertices: + for target in app_graph.vertices: + app_graph.add_edge(ApplicationEdge(source, target), "Test") + + machine = virtual_machine(24, 24) + placements = place_application_graph(machine, app_graph, 100, Placements()) + routing_tables = _route_and_time(machine, app_graph, placements, algorithm) + _check_edges(routing_tables, machine, placements, app_graph) + + +def test_multi_down_chips_and_links(params): + algorithm, n_vertices, n_m_vertices = params + unittest_setup() + app_graph = ApplicationGraph("Test") + for i in range(n_vertices): + _make_vertices(app_graph, 1000, n_m_vertices, f"app_vertex_{i}") + for source in app_graph.vertices: + for target in app_graph.vertices: + app_graph.add_edge(ApplicationEdge(source, target), "Test") + + machine = virtual_machine(8, 8) + placements = place_application_graph(machine, app_graph, 100, Placements()) + routing_tables = _route_and_time(machine, app_graph, placements, algorithm) + + # Pick a few of the chips and links used and take them out + chosen_entries = list() + count = 2 + for x, y in routing_tables.get_routers(): + if len(chosen_entries) >= 10: + break + + if count != 0: + count -= 1 + else: + entries = routing_tables.get_entries_for_router(x, y) + count = 11 - len(chosen_entries) + for entry in entries.values(): + if count != 0: + count -= 1 + else: + chosen_entries.append((x, y, entry)) + break + count = 2 + + down_links = "" + down_chips = "" + for i, (x, y, entry) in enumerate(chosen_entries): + if entry.link_ids: + link = list(entry.link_ids)[i % len(entry.link_ids)] + t_x, t_y = machine.xy_over_link(x, y, link) + t_l = (link + 3) % 6 + down_links += f"{x},{y},{link}:" + down_links += f"{t_x},{t_y},{t_l}:" + else: + down_chips += f"{x},{y}:" + + print("Down chips:", down_chips[:-1].split(":")) + print("Down links:", down_links[:-1].split(":")) + set_config("Machine", "down_chips", down_chips[:-1]) + set_config("Machine", "down_links", down_links[:-1]) + machine_down = virtual_machine(8, 8) + placements = place_application_graph( + machine_down, app_graph, 100, Placements()) + routing_tables = _route_and_time( + machine_down, app_graph, placements, algorithm) + _check_edges(routing_tables, machine_down, placements, app_graph) + + +def test_internal_only(params): + algorithm, _n_vertices, _n_m_vertices = params + unittest_setup() + app_graph = ApplicationGraph("Test") + _make_vertices_split( + app_graph, 1000, 3, 2, 2, "app_vertex", + internal_multicast=True) + + machine = virtual_machine(24, 24) + placements = place_application_graph(machine, app_graph, 100, Placements()) + routing_tables = _route_and_time(machine, app_graph, placements, algorithm) + _check_edges(routing_tables, machine, placements, app_graph) + + +def test_internal_and_split(params): + algorithm, n_vertices, n_m_vertices = params + unittest_setup() + app_graph = ApplicationGraph("Test") + for i in range(n_vertices): + _make_vertices_split( + app_graph, 1000, 3, 2, n_m_vertices, f"app_vertex_{i}", + internal_multicast=True) + for source in app_graph.vertices: + for target in app_graph.vertices: + if source != target: + app_graph.add_edge(ApplicationEdge(source, target), "Test") + + machine = virtual_machine(24, 24) + placements = place_application_graph(machine, app_graph, 100, Placements()) + routing_tables = _route_and_time(machine, app_graph, placements, algorithm) + _check_edges(routing_tables, machine, placements, app_graph) + + +def test_spinnaker_link(params): + algorithm, n_vertices, n_m_vertices = params + unittest_setup() + app_graph = ApplicationGraph("Test") + in_device = ApplicationSpiNNakerLinkVertex(100, 0) + in_device.splitter = SplitterFixedLegacy() + in_device.splitter.create_machine_vertices(ChipCounter()) + app_graph.add_vertex(in_device) + out_device = ApplicationSpiNNakerLinkVertex(100, 0) + out_device.splitter = SplitterFixedLegacy() + out_device.splitter.create_machine_vertices(ChipCounter()) + app_graph.add_vertex(out_device) + for i in range(n_vertices): + app_vertex = _make_vertices( + app_graph, 1000, n_m_vertices, f"app_vertex_{i}") + app_graph.add_edge(ApplicationEdge(in_device, app_vertex), "Test") + app_graph.add_edge(ApplicationEdge(app_vertex, out_device), "Test") + + machine = virtual_machine(8, 8) + placements = place_application_graph(machine, app_graph, 100, Placements()) + routing_tables = _route_and_time(machine, app_graph, placements, algorithm) + _check_edges(routing_tables, machine, placements, app_graph) + + +def test_fpga_link(params): + algorithm, n_vertices, n_m_vertices = params + unittest_setup() + app_graph = ApplicationGraph("Test") + in_device = ApplicationFPGAVertex(100, 0, 0) + in_device.splitter = SplitterFixedLegacy() + in_device.splitter.create_machine_vertices(ChipCounter()) + app_graph.add_vertex(in_device) + out_device = ApplicationFPGAVertex(100, 0, 1) + out_device.splitter = SplitterFixedLegacy() + out_device.splitter.create_machine_vertices(ChipCounter()) + app_graph.add_vertex(out_device) + for i in range(n_vertices): + app_vertex = _make_vertices( + app_graph, 1000, n_m_vertices, f"app_vertex_{i}") + app_graph.add_edge(ApplicationEdge(in_device, app_vertex), "Test") + app_graph.add_edge(ApplicationEdge(app_vertex, out_device), "Test") + + machine = virtual_machine(8, 8) + placements = place_application_graph(machine, app_graph, 100, Placements()) + routing_tables = _route_and_time(machine, app_graph, placements, algorithm) + _check_edges(routing_tables, machine, placements, app_graph) + + +def test_odd_case(params): + algorithm, _n_vertices, _n_m_vertices = params + unittest_setup() + app_graph = ApplicationGraph("Test") + target_vertex = _make_vertices(app_graph, 200, 20, "app_vertex") + delay_vertex = _make_one_to_one_vertices(app_graph, 200, 20, "delay_vtx") + app_graph.add_edge(ApplicationEdge(target_vertex, target_vertex), "Test") + app_graph.add_edge(ApplicationEdge(target_vertex, delay_vertex), "Test") + app_graph.add_edge(ApplicationEdge(delay_vertex, target_vertex), "Test") + + machine = virtual_machine(8, 8) + placements = Placements() + cores = [(x, y, p) for x, y in [(0, 3), (1, 3)] for p in range(3, 18)] + core_iter = iter(cores) + for m_vertex in delay_vertex.machine_vertices: + x, y, p = next(core_iter) + placements.add_placement(Placement(m_vertex, x, y, p)) + cores = [(0, 0, 3)] + cores.extend( + [(x, y, p) + for x, y in [(1, 0), (1, 1), (0, 1), (2, 0), (2, 1), (2, 2), + (1, 2), (0, 2), (3, 0), (3, 1), (3, 2)] + for p in range(2, 4)]) + core_iter = iter(cores) + for m_vertex in target_vertex.machine_vertices: + x, y, p = next(core_iter) + placements.add_placement(Placement(m_vertex, x, y, p)) + + routing_tables = _route_and_time(machine, app_graph, placements, algorithm) + _check_edges(routing_tables, machine, placements, app_graph) + + +def _check_path(source, nodes_fixed, machine, target): + c_x, c_y = source + seen = set() + for direction, (n_x, n_y) in nodes_fixed: + if (c_x, c_y) in seen: + raise Exception(f"Loop detected at {c_x}, {c_y}: {nodes_fixed}") + if not machine.is_chip_at(c_x, c_y): + raise Exception( + f"Route through down chip {c_x}, {c_y}: {nodes_fixed}") + if not machine.is_link_at(c_x, c_y, direction): + raise Exception( + f"Route through down link {c_x}, {c_y}, {direction}:" + f" {nodes_fixed}") + if not machine.xy_over_link(c_x, c_y, direction) == (n_x, n_y): + raise Exception( + f"Invalid route from {c_x}, {c_y}, {direction} to {n_x}, {n_y}" + f": {nodes_fixed}") + seen.add((c_x, c_y)) + c_x, c_y = n_x, n_y + + if (c_x, c_y) != target: + raise Exception(f"Route doesn't end at (5, 5): {nodes_fixed}") + + +def test_route_around(): + unittest_setup() + # Take out all the chips around 3,3 except one then make a path that goes + # through it + # 3,4 4,4 + # 2,3 3,3 4,3 + # 2,2 3,2 + set_config("Machine", "down_chips", "2,3:3,2:3,4:4,4:4,3") + machine = virtual_machine(8, 8) + vector = machine.get_vector((0, 0), (6, 6)) + nodes = longest_dimension_first(vector, (0, 0), machine) + nodes_fixed = _path_without_errors((0, 0), nodes, machine) + _check_path((0, 0), nodes_fixed, machine, (6, 6)) + + vector = machine.get_vector((2, 2), (6, 6)) + nodes = longest_dimension_first(vector, (2, 2), machine) + nodes_fixed = _path_without_errors((2, 2), nodes, machine) + _check_path((2, 2), nodes_fixed, machine, (6, 6)) + + print(nodes) + print(nodes_fixed) diff --git a/unittests/operations_tests/routing_info_algorithms_tests/test_malloc_routing_info_allocator.py b/unittests/operations_tests/routing_info_algorithms_tests/test_malloc_routing_info_allocator.py deleted file mode 100644 index a7e9ca746..000000000 --- a/unittests/operations_tests/routing_info_algorithms_tests/test_malloc_routing_info_allocator.py +++ /dev/null @@ -1,347 +0,0 @@ -# Copyright (c) 2017-2019 The University of Manchester -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . - -import unittest -from pacman.config_setup import unittest_setup -from pacman.exceptions import PacmanRouteInfoAllocationException -from pacman.model.constraints.key_allocator_constraints import ( - FixedKeyAndMaskConstraint, ShareKeyConstraint) -from pacman.model.graphs.machine import ( - MachineGraph, SimpleMachineVertex, MachineEdge) -from pacman.model.graphs.machine import MulticastEdgePartition -from pacman.model.resources import ResourceContainer -from pacman.operations.routing_info_allocator_algorithms\ - .malloc_based_routing_allocator.malloc_based_routing_info_allocator\ - import (_MallocBasedRoutingInfoAllocator, - malloc_based_routing_info_allocator) -from pacman.model.routing_info import ( - BaseKeyAndMask, DictBasedMachinePartitionNKeysMap) - - -class MyTestCase(unittest.TestCase): - - def setUp(self): - unittest_setup() - - def test_allocate_fixed_key_and_mask(self): - allocator = _MallocBasedRoutingInfoAllocator(None) - allocator._allocate_fixed_keys_and_masks( - [BaseKeyAndMask(0x800, 0xFFFFF800)], None) - error = ("Allocation has not resulted in the expected free space" - " being available") - print(allocator._free_space_tracker) - self.assertEqual(len(allocator._free_space_tracker), 2, error) - self.assertEqual(allocator._free_space_tracker[0].start_address, 0, - error) - self.assertEqual(allocator._free_space_tracker[0].size, 2048, - error) - self.assertEqual(allocator._free_space_tracker[1].start_address, - 0x1000, error) - self.assertEqual(allocator._free_space_tracker[1].size, 0xFFFFF000, - error) - - def _print_keys_and_masks(self, keys_and_masks): - for key_and_mask in keys_and_masks: - print("key =", hex(key_and_mask.key), - "mask =", hex(key_and_mask.mask)) - - def test_allocate_fixed_mask(self): - allocator = _MallocBasedRoutingInfoAllocator(None) - self._print_keys_and_masks(allocator._allocate_keys_and_masks( - 0xFFFFFF00, None, 20)) - error = ("Allocation has not resulted in the expected free space" - " being available") - print(allocator._free_space_tracker) - self.assertEqual(len(allocator._free_space_tracker), 1, error) - self.assertEqual(allocator._free_space_tracker[0].start_address, 0x100, - error) - self.assertEqual(allocator._free_space_tracker[0].size, 0xFFFFFF00, - error) - - def test_allocate_n_keys(self): - allocator = _MallocBasedRoutingInfoAllocator(None) - self._print_keys_and_masks(allocator._allocate_keys_and_masks( - None, None, 20)) - error = ("Allocation has not resulted in the expected free space" - " being available") - print(allocator._free_space_tracker) - self.assertEqual(len(allocator._free_space_tracker), 1, error) - self.assertEqual(allocator._free_space_tracker[0].start_address, 32, - error) - self.assertEqual(allocator._free_space_tracker[0].size, - 0x100000000 - 32, error) - - def test_allocate_mixed_keys(self): - fixed_masks = [None, None, 0xFFFFFF00, 0xFFFFF800] - n_keys = [200, 20, 20, 256] - - allocator = _MallocBasedRoutingInfoAllocator(None) - - allocator._allocate_fixed_keys_and_masks( - [BaseKeyAndMask(0x800, 0xFFFFF800)], None) - - print(allocator._free_space_tracker) - - for mask, keys in zip(fixed_masks, n_keys): - self._print_keys_and_masks( - allocator._allocate_keys_and_masks(mask, None, keys)) - print(allocator._free_space_tracker) - - print(allocator._free_space_tracker) - - error = ("Allocation has not resulted in the expected free space" - " being available") - self.assertEqual(len(allocator._free_space_tracker), 3, error) - self.assertEqual(allocator._free_space_tracker[0].start_address, - 0x120, error) - self.assertEqual(allocator._free_space_tracker[0].size, - 224, error) - self.assertEqual(allocator._free_space_tracker[1].start_address, - 0x300, error) - self.assertEqual(allocator._free_space_tracker[1].size, - 1280, error) - self.assertEqual(allocator._free_space_tracker[2].start_address, - 0x1800, error) - self.assertEqual(allocator._free_space_tracker[2].size, - 0x100000000 - 0x1800, error) - - def _integration_setup(self): - machine_graph = MachineGraph(label="test me you git") - n_keys_map = DictBasedMachinePartitionNKeysMap() - v1 = SimpleMachineVertex(ResourceContainer()) - v2 = SimpleMachineVertex(ResourceContainer()) - v3 = SimpleMachineVertex(ResourceContainer()) - v4 = SimpleMachineVertex(ResourceContainer()) - machine_graph.add_vertex(v1) - machine_graph.add_vertex(v2) - machine_graph.add_vertex(v3) - machine_graph.add_vertex(v4) - - e1 = MachineEdge(v1, v2, label="e1") - e2 = MachineEdge(v1, v3, label="e2") - e3 = MachineEdge(v2, v3, label="e3") - e4 = MachineEdge(v1, v4, label="e4") - - machine_graph.add_outgoing_edge_partition( - MulticastEdgePartition(identifier="part1", pre_vertex=v1)) - machine_graph.add_outgoing_edge_partition( - MulticastEdgePartition(identifier="part2", pre_vertex=v2)) - machine_graph.add_outgoing_edge_partition( - MulticastEdgePartition(identifier="part2", pre_vertex=v1)) - - machine_graph.add_edge(e1, "part1") - machine_graph.add_edge(e2, "part1") - machine_graph.add_edge(e3, "part2") - machine_graph.add_edge(e4, "part2") - - for partition in machine_graph.outgoing_edge_partitions: - n_keys_map.set_n_keys_for_partition(partition, 24) - - return machine_graph, n_keys_map, v1, v2, v3, v4, e1, e2, e3, e4 - - def test_share_key_with_2_nests(self): - machine_graph, n_keys_map, v1, v2, _v3, v4, e1, e2, e3, e4 = ( - self._integration_setup()) - e5 = MachineEdge(v4, v2, label="e1") - machine_graph.add_outgoing_edge_partition( - MulticastEdgePartition(identifier="part3", pre_vertex=v4)) - - machine_graph.add_edge(e5, "part3") - partition2 = machine_graph.\ - get_outgoing_edge_partition_starting_at_vertex(v4, "part3") - n_keys_map.set_n_keys_for_partition(partition2, 24) - - partition1 = machine_graph.\ - get_outgoing_edge_partition_starting_at_vertex(v1, "part1") - partition4 = machine_graph.\ - get_outgoing_edge_partition_starting_at_vertex(v1, "part2") - partition3 = machine_graph.\ - get_outgoing_edge_partition_starting_at_vertex(v2, "part2") - - partition1.add_constraint(ShareKeyConstraint([partition4])) - partition2.add_constraint(ShareKeyConstraint([partition3])) - partition3.add_constraint(ShareKeyConstraint([partition1])) - - results = malloc_based_routing_info_allocator( - machine_graph, n_keys_map) - - key = results.get_first_key_from_partition( - machine_graph.get_outgoing_edge_partition_starting_at_vertex( - v1, "part1")) - - edge1_key = results.get_first_key_for_edge(e1) - edge2_key = results.get_first_key_for_edge(e2) - edge3_key = results.get_first_key_for_edge(e3) - edge4_key = results.get_first_key_for_edge(e4) - edge5_key = results.get_first_key_for_edge(e5) - - self.assertEqual(edge1_key, key) - self.assertEqual(edge2_key, key) - self.assertEqual(edge3_key, key) - self.assertEqual(edge4_key, key) - self.assertEqual(edge5_key, key) - - def test_share_key_with_conflicting_fixed_key_on_partitions(self): - machine_graph, n_keys_map, v1, v2, _v3, _v4, _e1, _e2, _e3, _e4 = \ - self._integration_setup() - - partition = machine_graph.\ - get_outgoing_edge_partition_starting_at_vertex(v1, "part1") - other_partition = machine_graph.\ - get_outgoing_edge_partition_starting_at_vertex(v2, "part2") - other_partition.add_constraint(ShareKeyConstraint([partition])) - - other_partition.add_constraint(FixedKeyAndMaskConstraint( - [BaseKeyAndMask(base_key=30, mask=0xFFFFFFF)])) - partition.add_constraint(FixedKeyAndMaskConstraint( - [BaseKeyAndMask(base_key=25, mask=0xFFFFFFF)])) - - with self.assertRaises(PacmanRouteInfoAllocationException): - malloc_based_routing_info_allocator(machine_graph, n_keys_map) - - def test_share_key_with_fixed_key_on_new_partitions_other_order(self): - machine_graph, n_keys_map, v1, v2, _v3, _v4, e1, e2, e3, e4 = \ - self._integration_setup() - - partition = machine_graph.\ - get_outgoing_edge_partition_starting_at_vertex(v1, "part1") - other_partition = machine_graph.\ - get_outgoing_edge_partition_starting_at_vertex(v2, "part2") - other_partition.add_constraint(ShareKeyConstraint([partition])) - partition.add_constraint(FixedKeyAndMaskConstraint( - [BaseKeyAndMask(base_key=25, mask=0xFFFFFFF)])) - - results = malloc_based_routing_info_allocator( - machine_graph, n_keys_map) - - key = results.get_first_key_from_partition( - machine_graph.get_outgoing_edge_partition_starting_at_vertex( - v1, "part1")) - edge1_key = results.get_first_key_for_edge(e1) - edge2_key = results.get_first_key_for_edge(e2) - edge3_key = results.get_first_key_for_edge(e3) - edge4_key = results.get_first_key_for_edge(e4) - - self.assertEqual(key, 25) - self.assertEqual(edge1_key, key) - self.assertEqual(edge2_key, key) - self.assertEqual(edge3_key, key) - self.assertNotEqual(edge4_key, key) - - def test_share_key_with_fixed_key_on_new_partitions(self): - machine_graph, n_keys_map, v1, v2, _v3, _v4, e1, e2, e3, e4 = \ - self._integration_setup() - - partition = machine_graph.\ - get_outgoing_edge_partition_starting_at_vertex(v1, "part1") - other_partition = machine_graph.\ - get_outgoing_edge_partition_starting_at_vertex(v2, "part2") - partition.add_constraint(ShareKeyConstraint([other_partition])) - other_partition.add_constraint(FixedKeyAndMaskConstraint( - [BaseKeyAndMask(base_key=25, mask=0xFFFFFFF)])) - - results = malloc_based_routing_info_allocator( - machine_graph, n_keys_map) - - key = results.get_first_key_from_partition( - machine_graph.get_outgoing_edge_partition_starting_at_vertex( - v1, "part1")) - edge1_key = results.get_first_key_for_edge(e1) - edge2_key = results.get_first_key_for_edge(e2) - edge3_key = results.get_first_key_for_edge(e3) - edge4_key = results.get_first_key_for_edge(e4) - - self.assertEqual(key, 25) - self.assertEqual(edge1_key, key) - self.assertEqual(edge2_key, key) - self.assertEqual(edge3_key, key) - self.assertNotEqual(edge4_key, key) - - def test_share_key_on_own_partition(self): - machine_graph, n_keys_map, v1, _v2, _v3, _v4, e1, e2, e3, e4 = \ - self._integration_setup() - - partition = machine_graph.\ - get_outgoing_edge_partition_starting_at_vertex(v1, "part1") - other_partition = machine_graph.\ - get_outgoing_edge_partition_starting_at_vertex(v1, "part2") - partition.add_constraint(ShareKeyConstraint([other_partition])) - - results = malloc_based_routing_info_allocator( - machine_graph, n_keys_map) - - key = results.get_first_key_from_partition( - machine_graph.get_outgoing_edge_partition_starting_at_vertex( - v1, "part1")) - edge1_key = results.get_first_key_for_edge(e1) - edge2_key = results.get_first_key_for_edge(e2) - edge3_key = results.get_first_key_for_edge(e3) - edge4_key = results.get_first_key_for_edge(e4) - - self.assertEqual(edge1_key, key) - self.assertEqual(edge2_key, key) - self.assertNotEqual(edge3_key, key) - self.assertEqual(edge4_key, key) - - def test_share_key_on_new_partitions(self): - machine_graph, n_keys_map, v1, v2, _v3, _v4, e1, e2, e3, e4 = \ - self._integration_setup() - - partition = machine_graph.\ - get_outgoing_edge_partition_starting_at_vertex(v1, "part1") - other_partition = machine_graph.\ - get_outgoing_edge_partition_starting_at_vertex(v2, "part2") - partition.add_constraint(ShareKeyConstraint([other_partition])) - - results = malloc_based_routing_info_allocator( - machine_graph, n_keys_map) - - key = results.get_first_key_from_partition( - machine_graph.get_outgoing_edge_partition_starting_at_vertex( - v1, "part1")) - edge1_key = results.get_first_key_for_edge(e1) - edge2_key = results.get_first_key_for_edge(e2) - edge3_key = results.get_first_key_for_edge(e3) - edge4_key = results.get_first_key_for_edge(e4) - - self.assertEqual(edge1_key, key) - self.assertEqual(edge2_key, key) - self.assertEqual(edge3_key, key) - self.assertNotEqual(edge4_key, key) - - def test_no_share_key_on_partitions(self): - machine_graph, n_keys_map, v1, _v2, _v3, _v4, e1, e2, e3, e4 = \ - self._integration_setup() - - results = malloc_based_routing_info_allocator( - machine_graph, n_keys_map) - - key = results.get_first_key_from_partition( - machine_graph.get_outgoing_edge_partition_starting_at_vertex( - v1, "part1")) - - edge1_key = results.get_first_key_for_edge(e1) - edge2_key = results.get_first_key_for_edge(e2) - edge3_key = results.get_first_key_for_edge(e3) - edge4_key = results.get_first_key_for_edge(e4) - - self.assertEqual(edge1_key, key) - self.assertEqual(edge2_key, key) - self.assertNotEqual(edge3_key, key) - self.assertNotEqual(edge4_key, key) - - -if __name__ == '__main__': - unittest.main() diff --git a/unittests/operations_tests/routing_info_algorithms_tests/test_mask_generator.py b/unittests/operations_tests/routing_info_algorithms_tests/test_mask_generator.py deleted file mode 100644 index 1e4e247d4..000000000 --- a/unittests/operations_tests/routing_info_algorithms_tests/test_mask_generator.py +++ /dev/null @@ -1,41 +0,0 @@ -# Copyright (c) 2017-2019 The University of Manchester -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . - -import pytest -from pacman.config_setup import unittest_setup -from pacman.operations.routing_info_allocator_algorithms.\ - malloc_based_routing_allocator.utils import ( - get_possible_masks) - - -def test_mask_generator(): - """ This checks behaviour, but with restricted bitfield sizes. - """ - unittest_setup() - assert frozenset(get_possible_masks(2, 4, False)) == frozenset( - {14, 13, 11, 7}) - assert frozenset(get_possible_masks(2, 4, True)) == frozenset( - {14}) - assert frozenset(get_possible_masks(5, 4, False)) == frozenset( - {1, 2, 4, 8}) - assert frozenset(get_possible_masks(5, 5, False)) == frozenset( - {3, 5, 6, 9, 10, 12, 17, 18, 20, 24}) - assert frozenset(get_possible_masks(7, 5, False)) == frozenset( - {3, 5, 6, 9, 10, 12, 17, 18, 20, 24}) - assert frozenset(get_possible_masks(7, 3, False)) == frozenset( - {0}) - with pytest.raises(AssertionError): - # Can't fit - get_possible_masks(7, 2, False) diff --git a/unittests/operations_tests/routing_info_algorithms_tests/test_zoned_routing_allocator.py b/unittests/operations_tests/routing_info_algorithms_tests/test_zoned_routing_allocator.py index 1eb2f8aba..1016f12b3 100644 --- a/unittests/operations_tests/routing_info_algorithms_tests/test_zoned_routing_allocator.py +++ b/unittests/operations_tests/routing_info_algorithms_tests/test_zoned_routing_allocator.py @@ -15,345 +15,324 @@ from pacman.config_setup import unittest_setup from pacman.operations.routing_info_allocator_algorithms.\ zoned_routing_info_allocator import (flexible_allocate, global_allocate) -from pacman.model.graphs.application.application_graph import ApplicationGraph -from pacman.model.graphs.machine import SimpleMachineVertex -from pacman.model.graphs.machine.machine_graph import MachineGraph -from pacman.model.routing_info import DictBasedMachinePartitionNKeysMap -from pacman.model.graphs.machine.machine_edge import MachineEdge +from pacman.model.graphs.application import ( + ApplicationGraph, ApplicationEdge, ApplicationVertex) from pacman.model.routing_info.base_key_and_mask import BaseKeyAndMask +from pacman.model.graphs.machine.machine_vertex import MachineVertex +from pacman.model.partitioner_splitters.abstract_splitters import ( + AbstractSplitterCommon) from pacman.model.constraints.key_allocator_constraints \ import FixedKeyAndMaskConstraint -from pacman_test_objects import SimpleTestVertex + + +class TestSplitter(AbstractSplitterCommon): + + def create_machine_vertices(self, chip_counter): + return 1 + + def get_out_going_vertices(self, partition_id): + return self._governed_app_vertex.machine_vertices + + def get_in_coming_vertices(self, partition_id): + return self._governed_app_vertex.machine_vertices + + def machine_vertices_for_recording(self, variable_to_record): + return list(self._governed_app_vertex.machine_vertices) + + def get_out_going_slices(self): + return [m.slice for m in self._governed_app_vertex.machine_vertices] + + def get_in_coming_slices(self): + return [m.slice for m in self._governed_app_vertex.machine_vertices] + + def reset_called(self): + pass + + +class TestAppVertex(ApplicationVertex): + + @property + def n_atoms(self): + return 10 + + +class TestMacVertex(MachineVertex): + + def __init__( + self, label=None, constraints=None, app_vertex=None, + vertex_slice=None, n_keys_required=None): + super(TestMacVertex, self).__init__( + label=label, constraints=constraints, app_vertex=app_vertex, + vertex_slice=vertex_slice) + self.__n_keys_required = n_keys_required + + def get_n_keys_for_partition(self, partition_id): + return self.__n_keys_required[partition_id] + + @property + def resources_required(self): + # Not needed for test + return None def create_graphs1(with_fixed): app_graph = ApplicationGraph("Test") # An output vertex to aim things at (to make keys required) - out_app_vertex = SimpleTestVertex(1) + out_app_vertex = TestAppVertex(splitter=TestSplitter()) app_graph.add_vertex(out_app_vertex) # Create 5 application vertices (3 bits) app_vertices = list() for app_index in range(5): - app_vertices.append(SimpleTestVertex(1)) + app_vertices.append(TestAppVertex(splitter=TestSplitter())) app_graph.add_vertices(app_vertices) - mac_graph = MachineGraph("Test", app_graph) - n_keys_map = DictBasedMachinePartitionNKeysMap() - # An output vertex to aim things at (to make keys required) - out_mac_vertex = out_app_vertex.create_machine_vertex(None, None) - mac_graph.add_vertex(out_mac_vertex) + out_mac_vertex = TestMacVertex(label="out_vertex") + out_app_vertex.remember_machine_vertex(out_mac_vertex) - # Create 5 application vertices (3 bits) for app_index, app_vertex in enumerate(app_vertices): - # For each, create up to (5 x 4) + 1 = 21 machine vertices (5 bits) - for mac_index in range((app_index * 4) + 1): - mac_vertex = app_vertex.create_machine_vertex(None, None) - mac_graph.add_vertex(mac_vertex) - - # For each machine vertex create up to - # (20 x 2) + 1 = 81(!) partitions (6 bits) - for mac_edge_index in range((mac_index * 2) + 1): - mac_edge = MachineEdge(mac_vertex, out_mac_vertex) - part_name = "Part{}".format(mac_edge_index) - mac_graph.add_edge(mac_edge, part_name) - - # Give the partition up to (40 x 4) + 1 = 161 keys (8 bits) - p = mac_graph.get_outgoing_edge_partition_starting_at_vertex( - mac_vertex, part_name) - if with_fixed: - if (app_index == 2 and mac_index == 4 and - part_name == "Part7"): - p.add_constraint(FixedKeyAndMaskConstraint( - [BaseKeyAndMask(0xFE00000, 0xFFFFFFC0)])) - if (app_index == 2 and mac_index == 0 and - part_name == "Part1"): - p.add_constraint(FixedKeyAndMaskConstraint( - [BaseKeyAndMask(0x4c00000, 0xFFFFFFFE)])) - if (app_index == 2 and mac_index == 0 and - part_name == "Part1"): - p.add_constraint(FixedKeyAndMaskConstraint( - [BaseKeyAndMask(0x4c00000, 0xFFFFFFFF)])) - if (app_index == 3 and mac_index == 0 and - part_name == "Part1"): - p.add_constraint(FixedKeyAndMaskConstraint( - [BaseKeyAndMask(0x3300000, 0xFFFFFFFF)])) - if (app_index == 3 and mac_index == 0 and - part_name == "Part1"): - p.add_constraint(FixedKeyAndMaskConstraint( - [BaseKeyAndMask(0x3300001, 0)])) - n_keys_map.set_n_keys_for_partition( - p, (mac_edge_index * 4) + 1) - - return app_graph, mac_graph, n_keys_map + # Create up to (10 * 4) + 1 = 41 partitions (6 bits) + for i in range((app_index * 10) + 1): + app_graph.add_edge( + ApplicationEdge(app_vertex, out_app_vertex), f"Part{i}") + + # For each, create up to (40 * 2) + 1 = 81 machine vertices (7 bits) + for mac_index in range((app_index * 2 * 10) + 1): + + # Give the vertex up to (80 * 2) + 1 = 161 keys (8 bits) + mac_vertex = TestMacVertex( + label=f"Part{i}_vertex", + n_keys_required={f"Part{i}": (mac_index * 2) + 1 + for i in range((app_index * 10) + 1)}) + app_vertex.remember_machine_vertex(mac_vertex) + + if with_fixed: + if app_index == 2: + app_vertex.add_constraint(FixedKeyAndMaskConstraint( + [BaseKeyAndMask(0xFE00000, 0xFFFFFFC0)], + partition="Part7")) + if app_index == 2: + app_vertex.add_constraint(FixedKeyAndMaskConstraint( + [BaseKeyAndMask(0x4c00000, 0xFFFFFFFE)], + partition="Part1")) + if app_index == 3: + app_vertex.add_constraint(FixedKeyAndMaskConstraint( + [BaseKeyAndMask(0x3300000, 0xFFFFFFFF)], + partition="Part1")) + + return app_graph def create_graphs_only_fixed(): app_graph = ApplicationGraph("Test") # An output vertex to aim things at (to make keys required) - out_app_vertex = SimpleTestVertex(1) + out_app_vertex = TestAppVertex(splitter=TestSplitter()) app_graph.add_vertex(out_app_vertex) - # Create 5 application vertices (3 bits) - app_vertex = SimpleTestVertex(1) + app_vertex = TestAppVertex(splitter=TestSplitter()) app_graph.add_vertex(app_vertex) - mac_graph = MachineGraph("Test", app_graph) - n_keys_map = DictBasedMachinePartitionNKeysMap() - # An output vertex to aim things at (to make keys required) - out_mac_vertex = out_app_vertex.create_machine_vertex(None, None) - mac_graph.add_vertex(out_mac_vertex) - - mac_vertex = app_vertex.create_machine_vertex(None, None) - mac_graph.add_vertex(mac_vertex) - for mac_edge_index in range(2): - mac_edge = MachineEdge(mac_vertex, out_mac_vertex) - part_name = "Part{}".format(mac_edge_index) - mac_graph.add_edge(mac_edge, part_name) - p = mac_graph.get_outgoing_edge_partition_starting_at_vertex( - mac_vertex, part_name) - if (mac_edge_index == 0): - p.add_constraint(FixedKeyAndMaskConstraint( - [BaseKeyAndMask(0x4c00000, 0xFFFFFFFE)])) - if (mac_edge_index == 1): - p.add_constraint(FixedKeyAndMaskConstraint( - [BaseKeyAndMask(0x4c00000, 0xFFFFFFFF)])) - n_keys_map.set_n_keys_for_partition( - p, (mac_edge_index * 4) + 1) - - return app_graph, mac_graph, n_keys_map - - -def create_graphs_no_edge(): - app_graph = ApplicationGraph("Test") - # An output vertex to aim things at (to make keys required) - out_app_vertex = SimpleTestVertex(1) - app_graph.add_vertex(out_app_vertex) - # Create 5 application vertices (3 bits) - app_vertex = SimpleTestVertex(1) - app_graph.add_vertex(app_vertex) + out_mac_vertex = TestMacVertex(label="out_mac_vertex") + out_app_vertex.remember_machine_vertex(out_mac_vertex) - mac_graph = MachineGraph("Test", app_graph) - n_keys_map = DictBasedMachinePartitionNKeysMap() + mac_vertex = TestMacVertex(label="mac_vertex") + app_vertex.remember_machine_vertex(mac_vertex) - # An output vertex to aim things at (to make keys required) - out_mac_vertex = out_app_vertex.create_machine_vertex(None, None) - mac_graph.add_vertex(out_mac_vertex) + app_graph.add_edge(ApplicationEdge(app_vertex, out_app_vertex), "Part0") + app_graph.add_edge(ApplicationEdge(app_vertex, out_app_vertex), "Part1") - mac_vertex = app_vertex.create_machine_vertex(None, None) - mac_graph.add_vertex(mac_vertex) + app_vertex.add_constraint(FixedKeyAndMaskConstraint( + [BaseKeyAndMask(0x4c00000, 0xFFFFFFFE)], + partition="Part0")) + app_vertex.add_constraint(FixedKeyAndMaskConstraint( + [BaseKeyAndMask(0x4c00000, 0xFFFFFFFF)], + partition="Part1")) - return app_graph, mac_graph, n_keys_map + return app_graph -def create_app_less(): +def create_graphs_no_edge(): app_graph = ApplicationGraph("Test") - - mac_graph = MachineGraph("Test", app_graph) - n_keys_map = DictBasedMachinePartitionNKeysMap() + out_app_vertex = TestAppVertex(splitter=TestSplitter()) + app_graph.add_vertex(out_app_vertex) + app_vertex = TestAppVertex(splitter=TestSplitter()) + app_graph.add_vertex(app_vertex) # An output vertex to aim things at (to make keys required) - out_mac_vertex = SimpleMachineVertex(None, None) - mac_graph.add_vertex(out_mac_vertex) + out_mac_vertex = TestMacVertex() + out_app_vertex.remember_machine_vertex(out_mac_vertex) - # Create 5 application vertices (3 bits) - for app_index in range(5): - - # For each, create up to (5 x 4) + 1 = 21 machine vertices (5 bits) - for mac_index in range((app_index * 4) + 1): - mac_vertex = SimpleMachineVertex(None, None) - mac_graph.add_vertex(mac_vertex) - - # For each machine vertex create up to - # (20 x 2) + 1 = 81(!) partitions (6 bits) - for mac_edge_index in range((mac_index * 2) + 1): - mac_edge = MachineEdge(mac_vertex, out_mac_vertex) - part_name = "Part{}".format(mac_edge_index) - mac_graph.add_edge(mac_edge, part_name) + mac_vertex = TestMacVertex() + app_vertex.remember_machine_vertex(mac_vertex) - # Give the partition up to (40 x 4) + 1 = 161 keys (8 bits) - p = mac_graph.get_outgoing_edge_partition_starting_at_vertex( - mac_vertex, part_name) - n_keys_map.set_n_keys_for_partition( - p, (mac_edge_index * 4) + 1) - - return app_graph, mac_graph, n_keys_map + return app_graph def check_masks_all_the_same(routing_info, mask): # Check the mask is the same for all, and allows for the space required - # for the maximum number of keys in total (bottom 8 bits) + # for the maximum number of keys in total seen_keys = set() for r_info in routing_info: - assert(len(r_info.keys_and_masks) == 1) - if r_info.first_mask != mask: - label = r_info.partition.pre_vertex.label - assert(label == "RETINA") - assert(r_info.first_key not in seen_keys) - seen_keys.add(r_info.first_key) + if isinstance(r_info.vertex, MachineVertex): + assert(len(r_info.keys_and_masks) == 1) + assert(r_info.first_mask == mask or + r_info.machine_vertex.label == "RETINA") + assert(r_info.first_key not in seen_keys) + seen_keys.add(r_info.first_key) -def check_fixed(p, key): - for constraint in p.constraints: +def check_fixed(m_vertex, part_id, key): + for constraint in m_vertex.constraints: if isinstance(constraint, FixedKeyAndMaskConstraint): - assert key == constraint.keys_and_masks[0].key - return True + if constraint.applies_to_partition(part_id): + assert key == constraint.keys_and_masks[0].key + return True return False def check_keys_for_application_partition_pairs( - app_graph, m_graph, routing_info, app_mask): + app_graph, routing_info, app_mask): # Check the key for each application vertex/ parition pair is the same # The bits that should be the same are all but the bottom 12 - mapped_base = dict() - for app_vertex in app_graph.vertices: - for m_vertex in app_vertex.machine_vertices: - for p in m_graph.get_multicast_edge_partitions_starting_at_vertex( - m_vertex): - key = routing_info.get_first_key_from_partition(p) - if check_fixed(p, key): - continue - if (app_vertex, p.identifier) in mapped_base: - mapped_key = mapped_base[(app_vertex, p.identifier)] - assert((mapped_key & app_mask) == (key & app_mask)) - else: - mapped_base[(app_vertex, p.identifier)] = key - if key != 0: - assert((key & app_mask) != 0) + for part in app_graph.outgoing_edge_partitions: + mapped_key = None + for m_vertex in part.pre_vertex.splitter.get_out_going_vertices( + part.identifier): + key = routing_info.get_first_key_from_pre_vertex( + m_vertex, part.identifier) + if check_fixed(m_vertex, part.identifier, key): + continue + + if mapped_key is not None: + assert((mapped_key & app_mask) == (key & app_mask)) + else: + mapped_key = key + if key != 0: + assert((key & app_mask) != 0) def test_global_allocator(): unittest_setup() # Allocate something and check it does the right thing - app_graph, mac_graph, n_keys_map = create_graphs1(False) + app_graph = create_graphs1(False) - # The number of bits is 7 + 5 + 8 = 20, so it shouldn't fail - routing_info = global_allocate(mac_graph, n_keys_map) + # The number of bits is 6 + 7 + 8 = 21, so it shouldn't fail + routing_info = global_allocate(app_graph, []) - # Last 8 for buts + # Last 8 for atom id mask = 0xFFFFFF00 check_masks_all_the_same(routing_info, mask) - # all but the bottom 13 bits should be the same - app_mask = 0xFFFFE000 + # all but the bottom 8 + 7 = 15 bits should be the same + app_mask = 0xFFFF8000 check_keys_for_application_partition_pairs( - app_graph, mac_graph, routing_info, app_mask) + app_graph, routing_info, app_mask) def test_flexible_allocator_no_fixed(): unittest_setup() # Allocate something and check it does the right thing - app_graph, mac_graph, n_keys_map = create_graphs1(False) + app_graph = create_graphs1(False) - # The number of bits is 7 + 11 = 20, so it shouldn't fail - routing_info = flexible_allocate(mac_graph, n_keys_map) + # The number of bits is 8 + 7 + 6 = 21, so it shouldn't fail + routing_info = flexible_allocate(app_graph, []) - # all but the bottom 11 bits should be the same - app_mask = 0xFFFFF800 + # all but the bottom 8 + 7 = 15 bits should be the same + app_mask = 0xFFFF8000 check_keys_for_application_partition_pairs( - app_graph, mac_graph, routing_info, app_mask) + app_graph, routing_info, app_mask) def test_fixed_only(): unittest_setup() - app_graph, mac_graph, n_keys_map = create_graphs_only_fixed() - flexible_allocate(mac_graph, n_keys_map) - routing_info = global_allocate(mac_graph, n_keys_map) - assert len(list(routing_info)) == 2 + app_graph = create_graphs_only_fixed() + flexible_allocate(app_graph, []) + routing_info = global_allocate(app_graph, []) + assert len(list(routing_info)) == 4 def test_no_edge(): unittest_setup() - app_graph, mac_graph, n_keys_map = create_graphs_no_edge() - flexible_allocate(mac_graph, n_keys_map) - routing_info = global_allocate(mac_graph, n_keys_map) + app_graph = create_graphs_no_edge() + flexible_allocate(app_graph, []) + routing_info = global_allocate(app_graph, []) assert len(list(routing_info)) == 0 def test_flexible_allocator_with_fixed(): unittest_setup() # Allocate something and check it does the right thing - app_graph, mac_graph, n_keys_map = create_graphs1(True) + app_graph = create_graphs1(True) - # The number of bits is 7 + 11 = 20, so it shouldn't fail - routing_info = flexible_allocate(mac_graph, n_keys_map) + # The number of bits is 6 + 7 + 8 = 21, so it shouldn't fail + routing_info = flexible_allocate(app_graph, []) - # all but the bottom 11 bits should be the same - app_mask = 0xFFFFF800 + # all but the bottom 8 + 7 = 15 bits should be the same + app_mask = 0xFFFF8000 check_keys_for_application_partition_pairs( - app_graph, mac_graph, routing_info, app_mask) + app_graph, routing_info, app_mask) def create_big(with_fixed): # This test shows how easy it is to trip up the allocator with a retina app_graph = ApplicationGraph("Test") # Create a single "big" vertex - big_app_vertex = SimpleTestVertex(1, label="Retina") + big_app_vertex = TestAppVertex(label="Retina", splitter=TestSplitter()) app_graph.add_vertex(big_app_vertex) # Create a single output vertex (which won't send) - out_app_vertex = SimpleTestVertex(1, label="Destination") + out_app_vertex = TestAppVertex( + label="Destination", splitter=TestSplitter()) app_graph.add_vertex(out_app_vertex) # Create a load of middle vertex - mid_app_vertex = SimpleTestVertex(1, "Population") + mid_app_vertex = TestAppVertex(label="Population", splitter=TestSplitter()) app_graph.add_vertex(mid_app_vertex) - mac_graph = MachineGraph("Test", app_graph) - n_keys_map = DictBasedMachinePartitionNKeysMap() + app_graph.add_edge(ApplicationEdge(big_app_vertex, mid_app_vertex), "Test") + app_graph.add_edge(ApplicationEdge(mid_app_vertex, out_app_vertex), "Test") # Create a single big machine vertex - big_mac_vertex = big_app_vertex.create_machine_vertex( - None, None, label="RETINA") - mac_graph.add_vertex(big_mac_vertex) + big_mac_vertex = TestMacVertex( + label="RETINA", n_keys_required={"Test": 1024 * 768 * 2}) + big_app_vertex.remember_machine_vertex(big_mac_vertex) # Create a single output vertex (which won't send) - out_mac_vertex = out_app_vertex.create_machine_vertex(None, None) - mac_graph.add_vertex(out_mac_vertex) + out_mac_vertex = TestMacVertex(label="OutMacVertex") + out_app_vertex.remember_machine_vertex(out_mac_vertex) # Create a load of middle vertices and connect them up - for _ in range(2000): # 2000 needs 11 bits - mid_mac_vertex = mid_app_vertex.create_machine_vertex(None, None) - mac_graph.add_vertex(mid_mac_vertex) - edge = MachineEdge(big_mac_vertex, mid_mac_vertex) - mac_graph.add_edge(edge, "Test") - edge_2 = MachineEdge(mid_mac_vertex, out_mac_vertex) - mac_graph.add_edge(edge_2, "Test") - mid_part = mac_graph.get_outgoing_edge_partition_starting_at_vertex( - mid_mac_vertex, "Test") - n_keys_map.set_n_keys_for_partition(mid_part, 100) - - big_mac_part = mac_graph.get_outgoing_edge_partition_starting_at_vertex( - big_mac_vertex, "Test") + for i in range(2000): # 2000 needs 11 bits + mid_mac_vertex = TestMacVertex(label=f"MidMacVertex{i}", + n_keys_required={"Test": 100}) + mid_app_vertex.remember_machine_vertex(mid_mac_vertex) + if with_fixed: - big_mac_part.add_constraint(FixedKeyAndMaskConstraint([ + big_app_vertex.add_constraint(FixedKeyAndMaskConstraint([ BaseKeyAndMask(0x0, 0x180000)])) - # Make the "retina" need 21 bits, so total is now 21 + 11 = 32 bits, - # but the application vertices need some bits too - n_keys_map.set_n_keys_for_partition(big_mac_part, 1024 * 768 * 2) - return app_graph, mac_graph, n_keys_map + return app_graph def test_big_flexible_no_fixed(): unittest_setup() - app_graph, mac_graph, n_keys_map = create_big(False) + app_graph = create_big(False) # The number of bits is 1 + 11 + 21 = 33, so it shouldn't fail - routing_info = flexible_allocate(mac_graph, n_keys_map) + routing_info = flexible_allocate(app_graph, []) # The number of bits is 1 + 21 = 22, so it shouldn't fail # all but the bottom 21 bits should be the same app_mask = 0xFFE00000 check_keys_for_application_partition_pairs( - app_graph, mac_graph, routing_info, app_mask) + app_graph, routing_info, app_mask) def test_big_global_no_fixed(): unittest_setup() - app_graph, mac_graph, n_keys_map = create_big(False) + app_graph = create_big(False) # Make the call, and it should fail - routing_info = global_allocate(mac_graph, n_keys_map) + routing_info = global_allocate(app_graph, []) # 1 for app 11 for machine so where possible use 20 for atoms mask = 0xFFF00000 @@ -365,27 +344,27 @@ def test_big_global_no_fixed(): # all but the top 1 bits should be the same app_mask = 0x80000000 check_keys_for_application_partition_pairs( - app_graph, mac_graph, routing_info, app_mask) + app_graph, routing_info, app_mask) def test_big_flexible_fixed(): unittest_setup() - app_graph, mac_graph, n_keys_map = create_big(True) + app_graph = create_big(True) # The number of bits is 1 + 11 + 21 = 33, so it shouldn't fail - routing_info = flexible_allocate(mac_graph, n_keys_map) + routing_info = flexible_allocate(app_graph, []) # all but the bottom 18 bits should be the same app_mask = 0xFFFC0000 check_keys_for_application_partition_pairs( - app_graph, mac_graph, routing_info, app_mask) + app_graph, routing_info, app_mask) def test_big_global_fixed(): unittest_setup() - app_graph, mac_graph, n_keys_map = create_big(True) + app_graph = create_big(True) # Make the call, and it should fail - routing_info = global_allocate(mac_graph, n_keys_map) + routing_info = global_allocate(app_graph, []) # 7 bit atoms is 7 as it ignore the retina mask = 0xFFFFFF80 @@ -397,31 +376,4 @@ def test_big_global_fixed(): # all but the top 1 bits should be the same app_mask = 0xFFFC0000 check_keys_for_application_partition_pairs( - app_graph, mac_graph, routing_info, app_mask) - - -def test_no_app_level_flexible(): - unittest_setup() - app_graph, mac_graph, n_keys_map = create_app_less() - # The number of bits is 1 + 11 + 21 = 33, so it shouldn't fail - routing_info = flexible_allocate(mac_graph, n_keys_map) - - # all but the bottom 8 bits should be the same - app_mask = 0xFFFFFF00 - check_keys_for_application_partition_pairs( - app_graph, mac_graph, routing_info, app_mask) - - -def test_no_app_level_global(): - unittest_setup() - app_graph, mac_graph, n_keys_map = create_app_less() - # The number of bits is 1 + 11 + 21 = 33, so it shouldn't fail - routing_info = global_allocate(mac_graph, n_keys_map) - # Last 8 for masks - mask = 0xFFFFFF00 - check_masks_all_the_same(routing_info, mask) - - # all but the bottom 8 bits should be the same - app_mask = 0xFFFFFF00 - check_keys_for_application_partition_pairs( - app_graph, mac_graph, routing_info, app_mask) + app_graph, routing_info, app_mask) diff --git a/unittests/operations_tests/tag_allocator_tests/test_tags_board_addresses.py b/unittests/operations_tests/tag_allocator_tests/test_tags_board_addresses.py index 7a2246482..d49e6bd82 100644 --- a/unittests/operations_tests/tag_allocator_tests/test_tags_board_addresses.py +++ b/unittests/operations_tests/tag_allocator_tests/test_tags_board_addresses.py @@ -42,8 +42,7 @@ def test_ip_tags(self): placements = Placements( Placement(vertex, chip.x, chip.y, 1) for vertex, chip in zip(vertices, eth_chips)) - tags = basic_tag_allocator( - machine, plan_n_timesteps=None, placements=placements) + tags = basic_tag_allocator(machine, placements) for vertex, chip in zip(vertices, eth_chips): iptags = tags.get_ip_tags_for_vertex(vertex) @@ -88,8 +87,7 @@ def test_too_many_ip_tags_for_1_board(self): placements.add_placements( Placement(vertex, eth_chip_2.x, eth_chip_2.y, proc) for proc, vertex in zip(eth2_procs, eth2_vertices)) - tags = basic_tag_allocator( - machine, plan_n_timesteps=None, placements=placements) + tags = basic_tag_allocator(machine, placements) tags_by_board = defaultdict(set) for vertices in (eth_vertices, eth2_vertices): diff --git a/unittests/run_tests.py b/unittests/run_tests.py index c2b591279..6b106793b 100644 --- a/unittests/run_tests.py +++ b/unittests/run_tests.py @@ -21,7 +21,6 @@ testmodules = [ 'model_tests.graph_mapper_tests.test_graph_mapping', 'model_tests.graph_mapper_tests.test_slice', - 'model_tests.machine_graph_tests.test_machine_graph_model', 'model_tests.application_graph_tests.test_application_edge', 'model_tests.application_graph_tests.test_application_graph', 'model_tests.application_graph_tests.test_application_vertex', diff --git a/unittests/utilities_tests/test_json_utils.py b/unittests/utilities_tests/test_json_utils.py index 0dfef7b8c..724d8f994 100644 --- a/unittests/utilities_tests/test_json_utils.py +++ b/unittests/utilities_tests/test_json_utils.py @@ -15,30 +15,13 @@ import unittest import json from pacman.config_setup import unittest_setup -from pacman.model.constraints.key_allocator_constraints import ( - ContiguousKeyRangeContraint, FixedKeyAndMaskConstraint, - FixedMaskConstraint) -from pacman.model.constraints.placer_constraints import ( - BoardConstraint, ChipAndCoreConstraint, RadialPlacementFromChipConstraint, - SameChipAsConstraint) -from pacman.model.constraints.partitioner_constraints import ( - SameAtomsAsVertexConstraint) -from pacman.model.graphs.machine import MulticastEdgePartition from pacman.model.resources import ( ConstantSDRAM, CPUCyclesPerTickResource, DTCMResource, IPtagResource, ResourceContainer) -from pacman.model.routing_info import BaseKeyAndMask -from pacman.utilities import file_format_schemas from pacman.utilities.json_utils import ( - constraint_to_json, constraint_from_json, - edge_to_json, edge_from_json, - graph_to_json, graph_from_json, resource_container_to_json, resource_container_from_json, vertex_to_json, vertex_from_json) -from pacman.model.graphs.machine import ( - MachineEdge, MachineGraph, SimpleMachineVertex) - -MACHINE_GRAPH_FILENAME = "machine_graph.json" +from pacman.model.graphs.machine import SimpleMachineVertex class TestJsonUtils(unittest.TestCase): @@ -79,13 +62,6 @@ def _compare_vertex(self, v1, v2, seen=None): # Composite JSON round-trip testing schemes # ------------------------------------------------------------------ - def constraint_there_and_back(self, there): - j_object = constraint_to_json(there) - j_str = json.dumps(j_object) - j_object2 = json.loads(j_str) - back = constraint_from_json(j_object2) - self._compare_constraint(there, back) - def resource_there_and_back(self, there): j_object = resource_container_to_json(there) j_str = json.dumps(j_object) @@ -100,73 +76,10 @@ def vertex_there_and_back(self, there): back = vertex_from_json(j_object2) self._compare_vertex(there, back) - def edge_there_and_back(self, there): - j_object = edge_to_json(there) - j_str = json.dumps(j_object) - j_object2 = json.loads(j_str) - back = edge_from_json(j_object2) - self.assertEqual(there.label, back.label) - self._compare_vertex(there.pre_vertex, back.pre_vertex) - self._compare_vertex(there.post_vertex, back.post_vertex) - self.assertEqual(there.traffic_type, back.traffic_type) - self.assertEqual(there.traffic_weight, back.traffic_weight) - - def graph_there_and_back(self, there): - j_object = graph_to_json(there) - print(j_object) - file_format_schemas.validate(j_object, MACHINE_GRAPH_FILENAME) - back = graph_from_json(j_object) - self.assertEqual(there.n_vertices, back.n_vertices) - for vertex in there.vertices: - b_vertex = back.vertex_by_label(vertex.label) - self._compare_vertex(vertex, b_vertex) - # ------------------------------------------------------------------ # Test cases # ------------------------------------------------------------------ - def test_board_constraint(self): - c1 = BoardConstraint("1.2.3.4") - self.constraint_there_and_back(c1) - - def test_chip_and_core_constraint(self): - c1 = ChipAndCoreConstraint(1, 2) - self.constraint_there_and_back(c1) - c2 = ChipAndCoreConstraint(1, 2, 3) - self.constraint_there_and_back(c2) - - def test_radial_placement_from_chip_constraint(self): - c1 = RadialPlacementFromChipConstraint(1, 2) - self.constraint_there_and_back(c1) - - def test_same_chip_as_constraint(self): - v1 = SimpleMachineVertex(None, "v1") - c1 = SameChipAsConstraint(v1) - self.constraint_there_and_back(c1) - - def test_same_atoms_as_vertex_constraint(self): - with self.assertRaises(NotImplementedError): - v1 = SimpleMachineVertex(None, "v1") - c1 = SameAtomsAsVertexConstraint(v1) - self.constraint_there_and_back(c1) - - def test_contiguous_key_range_constraint(self): - c1 = ContiguousKeyRangeContraint() - self.constraint_there_and_back(c1) - - def test_fixed_key_and_mask_constraint(self): - c1 = FixedKeyAndMaskConstraint([ - BaseKeyAndMask(0xFF0, 0xFF8)]) - self.constraint_there_and_back(c1) - km = BaseKeyAndMask(0xFF0, 0xFF8) - km2 = BaseKeyAndMask(0xFE0, 0xFF8) - c2 = FixedKeyAndMaskConstraint([km, km2]) - self.constraint_there_and_back(c2) - - def test_fixed_mask_constraint(self): - c1 = FixedMaskConstraint(0xFF0) - self.constraint_there_and_back(c1) - def test_tags_resources(self): t1 = IPtagResource("1", 2, True) # Minimal args r1 = ResourceContainer(iptags=[t1]) @@ -191,59 +104,3 @@ def test_vertex(self): "127.0.0.1", port=None, strip_sdp=True)]), label="Vertex") self.vertex_there_and_back(s1) - - def test_vertex2(self): - """Like test_vertex, but with constraints.""" - c1 = ContiguousKeyRangeContraint() - c2 = BoardConstraint("1.2.3.4") - s1 = SimpleMachineVertex(ResourceContainer(iptags=[IPtagResource( - "127.0.0.1", port=None, strip_sdp=True)]), - label="Vertex", constraints=[c1, c2]) - self.vertex_there_and_back(s1) - - def test_same_chip_as_constraint_plus(self): - v1 = SimpleMachineVertex(None, "v1") - c1 = SameChipAsConstraint(v1) - self.constraint_there_and_back(c1) - - def test_edge(self): - v1 = SimpleMachineVertex(None, "One") - v2 = SimpleMachineVertex(None, "Two") - e1 = MachineEdge(v1, v2) - self.edge_there_and_back(e1) - - def test_new_empty_graph(self): - """ - test that the creation of a empty machine graph works - """ - m1 = MachineGraph("foo") - self.graph_there_and_back(m1) - - def test_new_graph(self): - """ - tests that after building a machine graph, all partitined vertices - and partitioned edges are in existence - """ - vertices = list() - edges = list() - for i in range(10): - vertices.append( - SimpleMachineVertex(ResourceContainer(), "V{}".format(i))) - with self.assertRaises(NotImplementedError): - vertices[1].add_constraint(SameAtomsAsVertexConstraint( - vertices[4])) - vertices[4].add_constraint(SameAtomsAsVertexConstraint( - vertices[1])) - for i in range(5): - edges.append(MachineEdge(vertices[0], vertices[(i + 1)])) - for i in range(5, 10): - edges.append(MachineEdge( - vertices[5], vertices[(i + 1) % 10])) - graph = MachineGraph("foo") - graph.add_vertices(vertices) - graph.add_outgoing_edge_partition(MulticastEdgePartition( - identifier="bar", pre_vertex=vertices[0])) - graph.add_outgoing_edge_partition(MulticastEdgePartition( - identifier="bar", pre_vertex=vertices[5])) - graph.add_edges(edges, "bar") - self.graph_there_and_back(graph) diff --git a/unittests/utilities_tests/test_placer_algorithm_utilities.py b/unittests/utilities_tests/test_placer_algorithm_utilities.py deleted file mode 100644 index 201a5519e..000000000 --- a/unittests/utilities_tests/test_placer_algorithm_utilities.py +++ /dev/null @@ -1,61 +0,0 @@ -# Copyright (c) 2017-2019 The University of Manchester -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . - -import unittest -from pacman.config_setup import unittest_setup -from pacman.utilities.algorithm_utilities.placer_algorithm_utilities import ( - add_set) - - -class TestUtilities(unittest.TestCase): - - def setUp(self): - unittest_setup() - - def test_add_join(self): - all_sets = list() - all_sets.append({1, 2}) - all_sets.append({3, 4}) - all_sets.append({5, 6}) - new_set = {2, 4} - add_set(all_sets, new_set) - self.assertEqual(2, len(all_sets)) - self.assertIn({1, 2, 3, 4}, all_sets) - self.assertIn({5, 6}, all_sets) - - def test_add_one(self): - all_sets = list() - all_sets.append({1, 2}) - all_sets.append({3, 4}) - all_sets.append({5, 6}) - new_set = {2, 7} - add_set(all_sets, new_set) - self.assertEqual(3, len(all_sets)) - self.assertIn({1, 2, 7}, all_sets) - self.assertIn({3, 4}, all_sets) - self.assertIn({5, 6}, all_sets) - - def test_add_new(self): - all_sets = list() - all_sets.append({1, 2}) - all_sets.append({3, 4}) - all_sets.append({5, 6}) - new_set = {8, 7} - add_set(all_sets, new_set) - self.assertEqual(4, len(all_sets)) - self.assertIn({1, 2}, all_sets) - self.assertIn({3, 4}, all_sets) - self.assertIn({5, 6}, all_sets) - self.assertIn({7, 8}, all_sets) diff --git a/unittests/utilities_tests/test_resource_tracker.py b/unittests/utilities_tests/test_resource_tracker.py deleted file mode 100644 index 3bddf98c1..000000000 --- a/unittests/utilities_tests/test_resource_tracker.py +++ /dev/null @@ -1,138 +0,0 @@ -# Copyright (c) 2017-2019 The University of Manchester -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . - -import unittest -from spinn_machine import ( - virtual_machine, Chip, Router, SDRAM, machine_from_chips) -from pacman.config_setup import unittest_setup -from pacman.model.resources import ( - ResourceContainer, ConstantSDRAM, PreAllocatedResourceContainer) -from pacman.exceptions import ( - PacmanInvalidParameterException, PacmanValueError) -from pacman.utilities.utility_objs import ResourceTracker - - -class TestResourceTracker(unittest.TestCase): - - def setUp(self): - unittest_setup() - - def test_n_cores_available(self): - machine = virtual_machine( - width=2, height=2, n_cpus_per_chip=18) - preallocated_resources = PreAllocatedResourceContainer() - preallocated_resources.add_cores_all(2) - preallocated_resources.add_cores_ethernet(3) - tracker = ResourceTracker( - machine, plan_n_timesteps=None, - preallocated_resources=preallocated_resources) - - # Should be 15 cores = 18 - 1 Monitor -3 ethernet -2 all cores - self.assertEqual(tracker._get_core_tracker(0, 0).n_cores_available, 12) - - # Should be 15 cores = 18 -2 other cores - self.assertEqual(tracker._get_core_tracker(0, 1).n_cores_available, 15) - - # Should be True since the core is not pre allocated - self.assertTrue(tracker._get_core_tracker(0, 0).is_core_available(2)) - - # Should be False since the core is monitor - self.assertFalse(tracker._get_core_tracker(0, 0).is_core_available(0)) - - # Allocate a core - tracker._get_core_tracker(0, 0).allocate(2) - - # Should be 11 cores as one now allocated - self.assertEqual(tracker._get_core_tracker(0, 0).n_cores_available, 11) - - with self.assertRaises(PacmanInvalidParameterException): - tracker._get_core_tracker(2, 2) - - def test_deallocation_of_resources(self): - machine = virtual_machine( - width=2, height=2, n_cpus_per_chip=18) - chip_sdram = machine.get_chip_at(1, 1).sdram.size - res_sdram = 12345 - - tracker = ResourceTracker(machine, plan_n_timesteps=None, - preallocated_resources=None) - - sdram_res = ConstantSDRAM(res_sdram) - resources = ResourceContainer(sdram=sdram_res) - chip_0 = machine.get_chip_at(0, 0) - - # verify core tracker is empty - if (0, 0) in tracker._core_tracker: - raise Exception("shouldnt exist") - - tracker._get_core_tracker(1, 1) - - # verify core tracker not empty - if (1, 1) not in tracker._core_tracker: - raise Exception("should exist") - - # verify sdram tracker - # 0, 0 in _sdram_tracker due to the get_core_tracker(0, 0) call - if tracker._sdram_tracker[1, 1] != chip_sdram: - raise Exception("incorrect sdram of {}".format( - tracker._sdram_tracker[1, 1])) - - # allocate some res - chip_x, chip_y, processor_id, ip_tags, reverse_ip_tags = \ - tracker.allocate_resources(resources, [(0, 0)]) - - # verify chips used is updated - cores = list(tracker._core_tracker[(0, 0)]._cores) - self.assertEqual(len(cores), chip_0.n_user_processors - 1) - - # verify sdram used is updated - sdram = tracker._sdram_tracker[(0, 0)] - self.assertEqual(sdram, chip_sdram-res_sdram) - - if (0, 0) not in tracker._chips_used: - raise Exception("should exist") - - # deallocate res - tracker.unallocate_resources( - chip_x, chip_y, processor_id, resources, ip_tags, reverse_ip_tags) - - # verify chips used is updated - if tracker._core_tracker[(0, 0)].n_cores_available != \ - chip_0.n_user_processors: - raise Exception("shouldn't exist or should be right size") - - # if (0, 0) in tracker._chips_used: - # raise Exception("shouldnt exist") - - # verify sdram tracker - if tracker._sdram_tracker[0, 0] != chip_sdram: - raise Exception("incorrect sdram of {}".format( - tracker._sdram_tracker[0, 0])) - - def test_allocate_resources_when_chip_used(self): - router = Router([]) - sdram = SDRAM() - empty_chip = Chip( - 0, 0, 1, router, sdram, 0, 0, "127.0.0.1", - virtual=False, tag_ids=[1]) - machine = machine_from_chips([empty_chip]) - resource_tracker = ResourceTracker(machine, plan_n_timesteps=None) - with self.assertRaises(PacmanValueError): - resource_tracker.allocate_resources( - ResourceContainer(sdram=ConstantSDRAM(1024))) - - -if __name__ == '__main__': - unittest.main()