From 8c3bc0b668ad7d16c1665d19d161c9f130ccf6b2 Mon Sep 17 00:00:00 2001 From: Dmitrii Beliakov Date: Wed, 4 Oct 2023 18:34:25 +0200 Subject: [PATCH 1/3] Add support of HA-flow endpoints update. --- .../java/org/openkilda/wfm/HaFlowHelper.java | 157 +++++++ .../mappers/HaFlowHistoryMapperTest.java | 398 +++++++----------- .../actions/DeallocateResourcesAction.java | 2 +- .../fsm/haflow/update/HaFlowUpdateFsm.java | 26 +- ...urceManagementOnEndpointsUpdateAction.java | 48 +++ .../update/actions/UpdateHaFlowAction.java | 15 +- .../flowhs/fsm/update/FlowUpdateFsm.java | 20 +- .../HandleNotCompletedCommandsAction.java | 2 +- .../actions/InstallIngressRulesAction.java | 4 +- .../actions/InstallNonIngressRulesAction.java | 4 +- .../update/actions/RemoveOldRulesAction.java | 6 +- .../update/actions/RevertNewRulesAction.java | 8 +- .../update/actions/RevertPathsSwapAction.java | 2 +- ...ipPathsAndResourcesDeallocationAction.java | 2 +- .../fsm/update/actions/UpdateFlowAction.java | 64 +-- .../flowhs/utils/EndpointUpdateType.java | 179 ++++++++ .../flowhs/utils/EndpointUpdateTypeTest.java | 157 +++++++ 17 files changed, 757 insertions(+), 337 deletions(-) create mode 100644 src-java/base-topology/base-storm-topology/src/test/java/org/openkilda/wfm/HaFlowHelper.java create mode 100644 src-java/flowhs-topology/flowhs-storm-topology/src/main/java/org/openkilda/wfm/topology/flowhs/fsm/haflow/update/actions/SkipResourceManagementOnEndpointsUpdateAction.java create mode 100644 src-java/flowhs-topology/flowhs-storm-topology/src/main/java/org/openkilda/wfm/topology/flowhs/utils/EndpointUpdateType.java create mode 100644 src-java/flowhs-topology/flowhs-storm-topology/src/test/java/org/openkilda/wfm/topology/flowhs/utils/EndpointUpdateTypeTest.java diff --git a/src-java/base-topology/base-storm-topology/src/test/java/org/openkilda/wfm/HaFlowHelper.java b/src-java/base-topology/base-storm-topology/src/test/java/org/openkilda/wfm/HaFlowHelper.java new file mode 100644 index 00000000000..bede0bb0408 --- /dev/null +++ b/src-java/base-topology/base-storm-topology/src/test/java/org/openkilda/wfm/HaFlowHelper.java @@ -0,0 +1,157 @@ +/* Copyright 2023 Telstra Open Source + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.openkilda.wfm; + +import org.openkilda.model.FlowEncapsulationType; +import org.openkilda.model.FlowPathDirection; +import org.openkilda.model.FlowPathStatus; +import org.openkilda.model.FlowStatus; +import org.openkilda.model.HaFlow; +import org.openkilda.model.HaFlowPath; +import org.openkilda.model.HaSubFlow; +import org.openkilda.model.PathComputationStrategy; +import org.openkilda.model.PathId; +import org.openkilda.model.Switch; +import org.openkilda.model.SwitchId; +import org.openkilda.model.cookie.FlowSegmentCookie; +import org.openkilda.wfm.share.history.model.HaFlowEventData; + +import com.google.common.collect.Sets; + +import java.time.Instant; +import java.time.temporal.ChronoUnit; + +public final class HaFlowHelper { + public static final String DETAILS = "details"; + public static final HaFlowEventData.Event EVENT_CREATE = HaFlowEventData.Event.CREATE; + public static final String ACTOR = "actor"; + public static final String CORRELATION_ID = "correlation id"; + public static final String FAKE_TIMESTAMP = "fake timestamp"; + public static final String HA_FLOW_ID = "HA flow ID"; + public static final HaFlowEventData.Initiator INITIATOR = HaFlowEventData.Initiator.NB; + public static final String HISTORY_ACTION = "Ha flow history action"; + public static final Integer SHARED_PORT = 1; + public static final Integer SHARED_OUTER_VLAN = 1000; + public static final Integer SHARED_INNER_VLAN = 2000; + public static final Long MAXIMUM_BANDWIDTH = 100_000L; + public static final PathComputationStrategy PATH_COMPUTATION_STRATEGY = PathComputationStrategy.COST; + public static final FlowEncapsulationType FLOW_ENCAPSULATION_TYPE = FlowEncapsulationType.VXLAN; + public static final Long MAX_LATENCY = 42L; + public static final Long MAX_LATENCY_TIER_2 = 84L; + public static final Boolean IGNORE_BANDWIDTH = false; + public static final Boolean PERIODIC_PINGS = false; + public static final Boolean PINNED = false; + public static final Integer PRIORITY = 455; + public static final String STATUS_INFO = "Status info"; + public static final Boolean STRICT_BANDWIDTH = false; + public static final String DESCRIPTION = "HA flow description"; + public static final Boolean ALLOCATE_PROTECTED_PATH = true; + public static final FlowStatus FLOW_STATUS = FlowStatus.UP; + public static final String AFFINITY_GROUP_ID = "affinity group ID"; + public static final String DIVERSE_GROUP_ID = "diverse group ID"; + public static final Switch SHARED_SWITCH = Switch.builder().switchId(new SwitchId("00:00:01")).build(); + public static final Switch SUBFLOW_SWITCH_A = Switch.builder().switchId(new SwitchId("00:00:02")).build(); + public static final Switch SUBFLOW_SWITCH_B = Switch.builder().switchId(new SwitchId("00:00:03")).build(); + public static final Instant TIME_CREATE = Instant.now().minus(1, ChronoUnit.HOURS); + public static final Instant TIME_MODIFY = Instant.now(); + public static final FlowPathStatus FLOW_PATH_STATUS = FlowPathStatus.ACTIVE; + public static final String HA_SUB_FLOW_ID = "HA sub flow ID"; + public static final SwitchId ENDPOINT_SWITCH_ID = new SwitchId("00:01"); + public static final Integer ENDPOINT_PORT = 1; + public static final Integer ENDPOINT_VLAN = 1000; + public static final Integer ENDPOINT_INNER_VLAN = 2000; + public static final long PATH_BANDWIDTH = 50_000L; + public static final FlowStatus SUBFLOW_STATUS = FlowStatus.UP; + public static final int SUBFLOW_INNER_VLAN_A = 555; + public static final int SUBFLOW_VLAN_A = 666; + public static final int SUBFLOW_PORT_A = 42; + public static final int SUBFLOW_INNER_VLAN_B = 777; + public static final int SUBFLOW_VLAN_B = 888; + public static final int SUBFLOW_PORT_B = 24; + public static final String HA_SUBFLOW_DESCRIPTION_B = "HA subflow description B"; + public static final String HA_SUBFLOW_DESCRIPTION_A = "HA subflow description A"; + + private HaFlowHelper() { + } + + /** + * Creates an HA-flow with paths and subflows configured. + * @return an HA-flow suitable for testing with all parameters set. + */ + public static HaFlow createHaFlow() { + HaFlow result = new HaFlow(HaFlowHelper.HA_FLOW_ID, + HaFlowHelper.SHARED_SWITCH, + HaFlowHelper.SHARED_PORT, + HaFlowHelper.SHARED_OUTER_VLAN, + HaFlowHelper.SHARED_INNER_VLAN, + HaFlowHelper.MAXIMUM_BANDWIDTH, + HaFlowHelper.PATH_COMPUTATION_STRATEGY, + HaFlowHelper.FLOW_ENCAPSULATION_TYPE, + HaFlowHelper.MAX_LATENCY, + HaFlowHelper.MAX_LATENCY_TIER_2, + HaFlowHelper.IGNORE_BANDWIDTH, + HaFlowHelper.PERIODIC_PINGS, + HaFlowHelper.PINNED, + HaFlowHelper.PRIORITY, + HaFlowHelper.STRICT_BANDWIDTH, + HaFlowHelper.DESCRIPTION, + HaFlowHelper.ALLOCATE_PROTECTED_PATH, + HaFlowHelper.FLOW_STATUS, + HaFlowHelper.STATUS_INFO, + HaFlowHelper.AFFINITY_GROUP_ID, + HaFlowHelper.DIVERSE_GROUP_ID); + + HaFlowPath forward = createHaFlowPath("forward ID", FlowPathDirection.FORWARD); + HaFlowPath reverse = createHaFlowPath("reverse ID", FlowPathDirection.REVERSE); + + result.setForwardPath(forward); + result.setReversePath(reverse); + result.addPaths(forward, reverse); + + HaSubFlow a = HaSubFlow.builder() + .description(HA_SUBFLOW_DESCRIPTION_A) + .status(SUBFLOW_STATUS) + .endpointInnerVlan(SUBFLOW_INNER_VLAN_A) + .endpointVlan(SUBFLOW_VLAN_A) + .endpointSwitch(SUBFLOW_SWITCH_A) + .endpointPort(SUBFLOW_PORT_A) + .haSubFlowId(result.getHaFlowId() + "_a") + .build(); + + HaSubFlow b = HaSubFlow.builder() + .description(HA_SUBFLOW_DESCRIPTION_B) + .status(SUBFLOW_STATUS) + .endpointInnerVlan(SUBFLOW_INNER_VLAN_B) + .endpointVlan(SUBFLOW_VLAN_B) + .endpointSwitch(SUBFLOW_SWITCH_B) + .endpointPort(SUBFLOW_PORT_B) + .haSubFlowId(result.getHaFlowId() + "_b") + .build(); + + result.setHaSubFlows(Sets.newHashSet(a, b)); + + return result; + } + + private static HaFlowPath createHaFlowPath(String pathId, FlowPathDirection direction) { + return HaFlowPath.builder() + .haPathId(new PathId(pathId)) + .sharedSwitch(HaFlowHelper.SHARED_SWITCH) + .bandwidth(HaFlowHelper.PATH_BANDWIDTH) + .cookie(FlowSegmentCookie.builder().direction(direction).build()) + .build(); + } +} diff --git a/src-java/base-topology/base-storm-topology/src/test/java/org/openkilda/wfm/share/mappers/HaFlowHistoryMapperTest.java b/src-java/base-topology/base-storm-topology/src/test/java/org/openkilda/wfm/share/mappers/HaFlowHistoryMapperTest.java index 6773edcc6ad..71b5a5bbf4e 100644 --- a/src-java/base-topology/base-storm-topology/src/test/java/org/openkilda/wfm/share/mappers/HaFlowHistoryMapperTest.java +++ b/src-java/base-topology/base-storm-topology/src/test/java/org/openkilda/wfm/share/mappers/HaFlowHistoryMapperTest.java @@ -17,25 +17,19 @@ import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertNotNull; -import static org.junit.jupiter.api.Assertions.assertTrue; import org.openkilda.messaging.payload.history.HaFlowDumpPayload; import org.openkilda.messaging.payload.history.HaFlowHistoryEntry; import org.openkilda.messaging.payload.history.HaFlowHistoryPayload; import org.openkilda.messaging.payload.history.HaFlowPathPayload; import org.openkilda.messaging.payload.history.HaSubFlowPayload; -import org.openkilda.model.FlowEncapsulationType; import org.openkilda.model.FlowPathDirection; -import org.openkilda.model.FlowPathStatus; -import org.openkilda.model.FlowStatus; import org.openkilda.model.GroupId; import org.openkilda.model.HaFlow; -import org.openkilda.model.HaFlowPath; import org.openkilda.model.MeterId; -import org.openkilda.model.PathComputationStrategy; import org.openkilda.model.PathId; -import org.openkilda.model.Switch; import org.openkilda.model.SwitchId; import org.openkilda.model.cookie.FlowSegmentCookie; import org.openkilda.model.history.HaFlowEvent; @@ -44,11 +38,10 @@ import org.openkilda.model.history.HaFlowEventDump.HaFlowEventDumpDataImpl; import org.openkilda.model.history.HaFlowEventDump.HaSubFlowDumpWrapper; import org.openkilda.model.history.HaFlowEventDump.PathNodePayload; +import org.openkilda.wfm.HaFlowHelper; import org.openkilda.wfm.share.history.model.DumpType; import org.openkilda.wfm.share.history.model.HaFlowDumpData; import org.openkilda.wfm.share.history.model.HaFlowEventData; -import org.openkilda.wfm.share.history.model.HaFlowEventData.Event; -import org.openkilda.wfm.share.history.model.HaFlowEventData.Initiator; import org.openkilda.wfm.share.history.model.HaFlowHistoryData; import org.openkilda.wfm.share.history.model.HaFlowPathDump; import org.openkilda.wfm.share.history.model.HaSubFlowDump; @@ -56,61 +49,22 @@ import com.google.common.collect.Lists; import org.junit.jupiter.api.Test; -import java.time.Instant; import java.time.ZoneOffset; -import java.time.temporal.ChronoUnit; import java.util.ArrayList; import java.util.List; public class HaFlowHistoryMapperTest { - public static final String DETAILS = "details"; - public static final Event EVENT_CREATE = Event.CREATE; - public static final String ACTOR = "actor"; - public static final String CORRELATION_ID = "correlation id"; - public static final String FAKE_TIMESTAMP = "fake timestamp"; - private static final String HA_FLOW_ID = "HA flow ID"; - public static final Initiator INITIATOR = Initiator.NB; - public static final String HISTORY_ACTION = "Ha flow history action"; - private static final Integer SHARED_PORT = 1; - private static final Integer SHARED_OUTER_VLAN = 1000; - private static final Integer SHARED_INNER_VLAN = 2000; - private static final Long MAXIMUM_BANDWIDTH = 100_000L; - private static final PathComputationStrategy PATH_COMPUTATION_STRATEGY = PathComputationStrategy.COST; - private static final FlowEncapsulationType FLOW_ENCAPSULATION_TYPE = FlowEncapsulationType.VXLAN; - private static final Long MAX_LATENCY = 42L; - private static final Long MAX_LATENCY_TIER_2 = 84L; - private static final Boolean IGNORE_BANDWIDTH = false; - private static final Boolean PERIODIC_PINGS = false; - private static final Boolean PINNED = false; - private static final Integer PRIORITY = 455; - public static final String STATUS_INFO = "Status info"; - private static final Boolean STRICT_BANDWIDTH = false; - private static final String DESCRIPTION = "HA flow description"; - private static final Boolean ALLOCATE_PROTECTED_PATH = true; - private static final FlowStatus FLOW_STATUS = FlowStatus.UP; - private static final String AFFINITY_GROUP_ID = "affinity group ID"; - private static final String DIVERSE_GROUP_ID = "diverse group ID"; - private static final Switch SHARED_SWITCH = Switch.builder().switchId(new SwitchId("00:00:01")).build(); - private static final Instant TIME_CREATE = Instant.now().minus(1, ChronoUnit.HOURS); - private static final Instant TIME_MODIFY = Instant.now(); - private static final FlowPathStatus FLOW_PATH_STATUS = FlowPathStatus.ACTIVE; - private static final String HA_SUB_FLOW_ID = "HA sub flow ID"; - private static final SwitchId ENDPOINT_SWITCH_ID = new SwitchId("00:01"); - private static final Integer ENDPOINT_PORT = 1; - private static final Integer ENDPOINT_VLAN = 1000; - private static final Integer ENDPOINT_INNER_VLAN = 2000; - private final HaFlowHistoryMapper mapper = HaFlowHistoryMapper.INSTANCE; @Test public void createHaFlowEvent() { HaFlowEventData source = HaFlowEventData.builder() - .event(EVENT_CREATE) - .details(DETAILS) - .initiator(INITIATOR) - .haFlowId(HA_FLOW_ID) - .time(TIME_CREATE) + .event(HaFlowHelper.EVENT_CREATE) + .details(HaFlowHelper.DETAILS) + .initiator(HaFlowHelper.INITIATOR) + .haFlowId(HaFlowHelper.HA_FLOW_ID) + .time(HaFlowHelper.TIME_CREATE) .build(); String taskId = "task ID"; HaFlowEvent result = mapper.createHaFlowEvent(source, taskId); @@ -126,10 +80,10 @@ public void createHaFlowEvent() { @Test public void createHaFlowEventAction() { HaFlowHistoryData source = HaFlowHistoryData.builder() - .action(HISTORY_ACTION) - .description(DESCRIPTION) - .haFlowId(HA_FLOW_ID) - .time(TIME_CREATE) + .action(HaFlowHelper.HISTORY_ACTION) + .description(HaFlowHelper.DESCRIPTION) + .haFlowId(HaFlowHelper.HA_FLOW_ID) + .time(HaFlowHelper.TIME_CREATE) .build(); String taskId = "taskID"; @@ -144,43 +98,43 @@ public void createHaFlowEventAction() { @Test public void createHaFlowHistoryEntries() { HaFlowEvent haFlowEvent = HaFlowEvent.builder() - .haFlowId(HA_FLOW_ID) - .actor(ACTOR) - .timestamp(TIME_CREATE) - .taskId(CORRELATION_ID) - .details(DETAILS) + .haFlowId(HaFlowHelper.HA_FLOW_ID) + .actor(HaFlowHelper.ACTOR) + .timestamp(HaFlowHelper.TIME_CREATE) + .taskId(HaFlowHelper.CORRELATION_ID) + .details(HaFlowHelper.DETAILS) .build(); List payloads = Lists.newArrayList(HaFlowHistoryPayload.builder() - .details(DETAILS) - .timestampIso(FAKE_TIMESTAMP) - .action(HISTORY_ACTION) - .timestamp(TIME_CREATE) + .details(HaFlowHelper.DETAILS) + .timestampIso(HaFlowHelper.FAKE_TIMESTAMP) + .action(HaFlowHelper.HISTORY_ACTION) + .timestamp(HaFlowHelper.TIME_CREATE) .build()); List dumps = Lists.newArrayList(HaFlowDumpPayload.builder() - .affinityGroupId(AFFINITY_GROUP_ID) - .allocateProtectedPath(ALLOCATE_PROTECTED_PATH) - .description(DESCRIPTION) - .diverseGroupId(DIVERSE_GROUP_ID) + .affinityGroupId(HaFlowHelper.AFFINITY_GROUP_ID) + .allocateProtectedPath(HaFlowHelper.ALLOCATE_PROTECTED_PATH) + .description(HaFlowHelper.DESCRIPTION) + .diverseGroupId(HaFlowHelper.DIVERSE_GROUP_ID) .dumpType(org.openkilda.model.history.DumpType.STATE_BEFORE) - .encapsulationType(FLOW_ENCAPSULATION_TYPE) - .flowTimeCreate(TIME_CREATE.toString()) - .flowTimeModify(TIME_MODIFY.toString()) - .haFlowId(HA_FLOW_ID) - .ignoreBandwidth(IGNORE_BANDWIDTH) - .maximumBandwidth(MAXIMUM_BANDWIDTH) - .maxLatency(MAX_LATENCY) - .maxLatencyTier2(MAX_LATENCY_TIER_2) - .pathComputationStrategy(PATH_COMPUTATION_STRATEGY) - .pinned(PINNED) - .priority(PRIORITY) - .periodicPings(PERIODIC_PINGS) - .status(FLOW_STATUS) - .sharedPort(SHARED_PORT) - .sharedInnerVlan(SHARED_INNER_VLAN) - .sharedOuterVlan(SHARED_OUTER_VLAN) - .sharedSwitchId(SHARED_SWITCH.getSwitchId().toString()) - .strictBandwidth(STRICT_BANDWIDTH) - .taskId(CORRELATION_ID) + .encapsulationType(HaFlowHelper.FLOW_ENCAPSULATION_TYPE) + .flowTimeCreate(HaFlowHelper.TIME_CREATE.toString()) + .flowTimeModify(HaFlowHelper.TIME_MODIFY.toString()) + .haFlowId(HaFlowHelper.HA_FLOW_ID) + .ignoreBandwidth(HaFlowHelper.IGNORE_BANDWIDTH) + .maximumBandwidth(HaFlowHelper.MAXIMUM_BANDWIDTH) + .maxLatency(HaFlowHelper.MAX_LATENCY) + .maxLatencyTier2(HaFlowHelper.MAX_LATENCY_TIER_2) + .pathComputationStrategy(HaFlowHelper.PATH_COMPUTATION_STRATEGY) + .pinned(HaFlowHelper.PINNED) + .priority(HaFlowHelper.PRIORITY) + .periodicPings(HaFlowHelper.PERIODIC_PINGS) + .status(HaFlowHelper.FLOW_STATUS) + .sharedPort(HaFlowHelper.SHARED_PORT) + .sharedInnerVlan(HaFlowHelper.SHARED_INNER_VLAN) + .sharedOuterVlan(HaFlowHelper.SHARED_OUTER_VLAN) + .sharedSwitchId(HaFlowHelper.SHARED_SWITCH.getSwitchId().toString()) + .strictBandwidth(HaFlowHelper.STRICT_BANDWIDTH) + .taskId(HaFlowHelper.CORRELATION_ID) .haSubFlows(Lists.newArrayList(createHaSubFlowPayload())) .protectedForwardPath(createHaFlowPathPayload(FlowPathDirection.FORWARD)) .protectedReversePath(createHaFlowPathPayload(FlowPathDirection.REVERSE)) @@ -196,40 +150,40 @@ public void createHaFlowHistoryEntries() { assertEquals(haFlowEvent.getActor(), result.getActor()); assertEquals(haFlowEvent.getAction(), result.getAction()); assertEquals(haFlowEvent.getTimestamp(), result.getTime()); - assertEquals(TIME_CREATE.atOffset(ZoneOffset.UTC).toString(), result.getTimestampIso()); + assertEquals(HaFlowHelper.TIME_CREATE.atOffset(ZoneOffset.UTC).toString(), result.getTimestampIso()); assertNotNull(result.getDumps()); - assertTrue(result.getDumps().size() > 0); + assertFalse(result.getDumps().isEmpty()); HaFlowDumpPayload resultDump = result.getDumps().get(0); - assertEquals(HA_FLOW_ID, resultDump.getHaFlowId()); - assertEquals(SHARED_PORT, resultDump.getSharedPort()); - assertEquals(SHARED_OUTER_VLAN, resultDump.getSharedOuterVlan()); - assertEquals(SHARED_INNER_VLAN, resultDump.getSharedInnerVlan()); - assertEquals(MAXIMUM_BANDWIDTH, resultDump.getMaximumBandwidth()); - assertEquals(PATH_COMPUTATION_STRATEGY, resultDump.getPathComputationStrategy()); - assertEquals(FLOW_ENCAPSULATION_TYPE, resultDump.getEncapsulationType()); - assertEquals(MAX_LATENCY, resultDump.getMaxLatency()); - assertEquals(MAX_LATENCY_TIER_2, resultDump.getMaxLatencyTier2()); - assertEquals(IGNORE_BANDWIDTH, resultDump.getIgnoreBandwidth()); - assertEquals(PERIODIC_PINGS, resultDump.getPeriodicPings()); - assertEquals(PINNED, resultDump.getPinned()); - assertEquals(PRIORITY, resultDump.getPriority()); - assertEquals(STRICT_BANDWIDTH, resultDump.getStrictBandwidth()); - assertEquals(DESCRIPTION, resultDump.getDescription()); - assertEquals(ALLOCATE_PROTECTED_PATH, resultDump.getAllocateProtectedPath()); - assertEquals(FLOW_STATUS, resultDump.getStatus()); - assertEquals(AFFINITY_GROUP_ID, resultDump.getAffinityGroupId()); - assertEquals(DIVERSE_GROUP_ID, resultDump.getDiverseGroupId()); + assertEquals(HaFlowHelper.HA_FLOW_ID, resultDump.getHaFlowId()); + assertEquals(HaFlowHelper.SHARED_PORT, resultDump.getSharedPort()); + assertEquals(HaFlowHelper.SHARED_OUTER_VLAN, resultDump.getSharedOuterVlan()); + assertEquals(HaFlowHelper.SHARED_INNER_VLAN, resultDump.getSharedInnerVlan()); + assertEquals(HaFlowHelper.MAXIMUM_BANDWIDTH, resultDump.getMaximumBandwidth()); + assertEquals(HaFlowHelper.PATH_COMPUTATION_STRATEGY, resultDump.getPathComputationStrategy()); + assertEquals(HaFlowHelper.FLOW_ENCAPSULATION_TYPE, resultDump.getEncapsulationType()); + assertEquals(HaFlowHelper.MAX_LATENCY, resultDump.getMaxLatency()); + assertEquals(HaFlowHelper.MAX_LATENCY_TIER_2, resultDump.getMaxLatencyTier2()); + assertEquals(HaFlowHelper.IGNORE_BANDWIDTH, resultDump.getIgnoreBandwidth()); + assertEquals(HaFlowHelper.PERIODIC_PINGS, resultDump.getPeriodicPings()); + assertEquals(HaFlowHelper.PINNED, resultDump.getPinned()); + assertEquals(HaFlowHelper.PRIORITY, resultDump.getPriority()); + assertEquals(HaFlowHelper.STRICT_BANDWIDTH, resultDump.getStrictBandwidth()); + assertEquals(HaFlowHelper.DESCRIPTION, resultDump.getDescription()); + assertEquals(HaFlowHelper.ALLOCATE_PROTECTED_PATH, resultDump.getAllocateProtectedPath()); + assertEquals(HaFlowHelper.FLOW_STATUS, resultDump.getStatus()); + assertEquals(HaFlowHelper.AFFINITY_GROUP_ID, resultDump.getAffinityGroupId()); + assertEquals(HaFlowHelper.DIVERSE_GROUP_ID, resultDump.getDiverseGroupId()); assertNotNull(result.getPayloads()); - assertTrue(result.getPayloads().size() > 0); + assertFalse(result.getPayloads().isEmpty()); HaFlowHistoryPayload resultPayload = result.getPayloads().get(0); - assertEquals(FAKE_TIMESTAMP, resultPayload.getTimestampIso()); - assertEquals(DETAILS, resultPayload.getDetails()); - assertEquals(TIME_CREATE, resultPayload.getTimestamp()); - assertEquals(HISTORY_ACTION, resultPayload.getAction()); + assertEquals(HaFlowHelper.FAKE_TIMESTAMP, resultPayload.getTimestampIso()); + assertEquals(HaFlowHelper.DETAILS, resultPayload.getDetails()); + assertEquals(HaFlowHelper.TIME_CREATE, resultPayload.getTimestamp()); + assertEquals(HaFlowHelper.HISTORY_ACTION, resultPayload.getAction()); } private HaFlowPathPayload createHaFlowPathPayload(FlowPathDirection direction) { @@ -247,12 +201,12 @@ private HaFlowPathPayload createHaFlowPathPayload(FlowPathDirection direction) { .yPointGroupId(GroupId.MIN_FLOW_GROUP_ID.toString()) .yPointSwitchId(new SwitchId("00:03").toString()) .yPointMeterId(MeterId.LACP_REPLY_METER_ID.toString()) - .timeCreate(TIME_CREATE.toString()) - .timeModify(TIME_MODIFY.toString()) + .timeCreate(HaFlowHelper.TIME_CREATE.toString()) + .timeModify(HaFlowHelper.TIME_MODIFY.toString()) .sharedPointMeterId(MeterId.LACP_REPLY_METER_ID.toString()) .cookie(FlowSegmentCookie.builder().direction(direction).build().toString()) - .ignoreBandwidth(IGNORE_BANDWIDTH) - .status(FLOW_PATH_STATUS) + .ignoreBandwidth(HaFlowHelper.IGNORE_BANDWIDTH) + .status(HaFlowHelper.FLOW_PATH_STATUS) .paths(pathNodesList) .haSubFlows(haSubFlowDump) .build(); @@ -260,16 +214,16 @@ private HaFlowPathPayload createHaFlowPathPayload(FlowPathDirection direction) { private HaSubFlowPayload createHaSubFlowPayload() { return HaSubFlowPayload.builder() - .haSubFlowId(HA_SUB_FLOW_ID) - .haFlowId(HA_FLOW_ID) - .status(FLOW_STATUS) - .endpointSwitchId(ENDPOINT_SWITCH_ID.toString()) - .endpointPort(ENDPOINT_PORT) - .endpointVlan(ENDPOINT_VLAN) - .endpointInnerVlan(ENDPOINT_INNER_VLAN) - .description(DESCRIPTION) - .timeCreate(TIME_CREATE.toString()) - .timeModify(TIME_MODIFY.toString()) + .haSubFlowId(HaFlowHelper.HA_SUB_FLOW_ID) + .haFlowId(HaFlowHelper.HA_FLOW_ID) + .status(HaFlowHelper.FLOW_STATUS) + .endpointSwitchId(HaFlowHelper.ENDPOINT_SWITCH_ID.toString()) + .endpointPort(HaFlowHelper.ENDPOINT_PORT) + .endpointVlan(HaFlowHelper.ENDPOINT_VLAN) + .endpointInnerVlan(HaFlowHelper.ENDPOINT_INNER_VLAN) + .description(HaFlowHelper.DESCRIPTION) + .timeCreate(HaFlowHelper.TIME_CREATE.toString()) + .timeModify(HaFlowHelper.TIME_MODIFY.toString()) .build(); } @@ -400,7 +354,7 @@ public void messagingToPersistence() { @Test public void createHaFlowDump() { - HaFlow source = createHaFlow(); + HaFlow source = HaFlowHelper.createHaFlow(); HaFlowDumpData dump = mapper.toHaFlowDumpData(source, "correlation ID", DumpType.STATE_AFTER); @@ -454,29 +408,29 @@ public void createHaFlowDump() { private HaFlowEventDump createHaFlowEventDump(org.openkilda.model.history.DumpType dumpType, String correlationId) { return new HaFlowEventDump(HaFlowEventDumpDataImpl.builder() - .affinityGroupId(AFFINITY_GROUP_ID) - .allocateProtectedPath(ALLOCATE_PROTECTED_PATH) - .description(DESCRIPTION) - .diverseGroupId(DIVERSE_GROUP_ID) + .affinityGroupId(HaFlowHelper.AFFINITY_GROUP_ID) + .allocateProtectedPath(HaFlowHelper.ALLOCATE_PROTECTED_PATH) + .description(HaFlowHelper.DESCRIPTION) + .diverseGroupId(HaFlowHelper.DIVERSE_GROUP_ID) .dumpType(dumpType) - .encapsulationType(FLOW_ENCAPSULATION_TYPE) - .flowTimeCreate(TIME_CREATE.toString()) - .flowTimeModify(TIME_MODIFY.toString()) - .haFlowId(HA_FLOW_ID) - .ignoreBandwidth(IGNORE_BANDWIDTH) - .maximumBandwidth(MAXIMUM_BANDWIDTH) - .maxLatency(MAX_LATENCY) - .maxLatencyTier2(MAX_LATENCY_TIER_2) - .pathComputationStrategy(PATH_COMPUTATION_STRATEGY) - .pinned(PINNED) - .priority(PRIORITY) - .periodicPings(PERIODIC_PINGS) - .status(FLOW_STATUS) - .sharedPort(SHARED_PORT) - .sharedInnerVlan(SHARED_INNER_VLAN) - .sharedOuterVlan(SHARED_OUTER_VLAN) - .sharedSwitchId(SHARED_SWITCH.getSwitchId().toString()) - .strictBandwidth(STRICT_BANDWIDTH) + .encapsulationType(HaFlowHelper.FLOW_ENCAPSULATION_TYPE) + .flowTimeCreate(HaFlowHelper.TIME_CREATE.toString()) + .flowTimeModify(HaFlowHelper.TIME_MODIFY.toString()) + .haFlowId(HaFlowHelper.HA_FLOW_ID) + .ignoreBandwidth(HaFlowHelper.IGNORE_BANDWIDTH) + .maximumBandwidth(HaFlowHelper.MAXIMUM_BANDWIDTH) + .maxLatency(HaFlowHelper.MAX_LATENCY) + .maxLatencyTier2(HaFlowHelper.MAX_LATENCY_TIER_2) + .pathComputationStrategy(HaFlowHelper.PATH_COMPUTATION_STRATEGY) + .pinned(HaFlowHelper.PINNED) + .priority(HaFlowHelper.PRIORITY) + .periodicPings(HaFlowHelper.PERIODIC_PINGS) + .status(HaFlowHelper.FLOW_STATUS) + .sharedPort(HaFlowHelper.SHARED_PORT) + .sharedInnerVlan(HaFlowHelper.SHARED_INNER_VLAN) + .sharedOuterVlan(HaFlowHelper.SHARED_OUTER_VLAN) + .sharedSwitchId(HaFlowHelper.SHARED_SWITCH.getSwitchId().toString()) + .strictBandwidth(HaFlowHelper.STRICT_BANDWIDTH) .taskId(correlationId) .haSubFlows(createPersistenceHaSubFlowDumpWrapper()) .protectedForwardPath(createPersistenceHaFlowPathDump(FlowPathDirection.FORWARD)) @@ -499,12 +453,12 @@ private HaFlowEventDump.HaFlowPathDump createPersistenceHaFlowPathDump(FlowPathD .yPointGroupId(GroupId.MIN_FLOW_GROUP_ID.toString()) .yPointSwitchId("00:03") .yPointMeterId(MeterId.LACP_REPLY_METER_ID.toString()) - .timeCreate(TIME_CREATE.toString()) - .timeModify(TIME_MODIFY.toString()) + .timeCreate(HaFlowHelper.TIME_CREATE.toString()) + .timeModify(HaFlowHelper.TIME_MODIFY.toString()) .sharedPointMeterId(MeterId.LACP_REPLY_METER_ID.toString()) .cookie(FlowSegmentCookie.builder().direction(direction).build().toString()) - .ignoreBandwidth(IGNORE_BANDWIDTH) - .status(FLOW_PATH_STATUS.toString()) + .ignoreBandwidth(HaFlowHelper.IGNORE_BANDWIDTH) + .status(HaFlowHelper.FLOW_PATH_STATUS.toString()) .paths(pathNodesList) .haSubFlows(Lists.newArrayList(createPersistenceHaSubFlowDump())) .build(); @@ -512,16 +466,16 @@ private HaFlowEventDump.HaFlowPathDump createPersistenceHaFlowPathDump(FlowPathD private HaFlowEventDump.HaSubFlowDump createPersistenceHaSubFlowDump() { return HaFlowEventDump.HaSubFlowDump.builder() - .haSubFlowId(HA_SUB_FLOW_ID) - .haFlowId(HA_FLOW_ID) - .status(FLOW_STATUS) - .endpointSwitchId(ENDPOINT_SWITCH_ID.toString()) - .endpointPort(ENDPOINT_PORT) - .endpointVlan(ENDPOINT_VLAN) - .endpointInnerVlan(ENDPOINT_INNER_VLAN) - .description(DESCRIPTION) - .timeCreate(TIME_CREATE.toString()) - .timeModify(TIME_MODIFY.toString()) + .haSubFlowId(HaFlowHelper.HA_SUB_FLOW_ID) + .haFlowId(HaFlowHelper.HA_FLOW_ID) + .status(HaFlowHelper.FLOW_STATUS) + .endpointSwitchId(HaFlowHelper.ENDPOINT_SWITCH_ID.toString()) + .endpointPort(HaFlowHelper.ENDPOINT_PORT) + .endpointVlan(HaFlowHelper.ENDPOINT_VLAN) + .endpointInnerVlan(HaFlowHelper.ENDPOINT_INNER_VLAN) + .description(HaFlowHelper.DESCRIPTION) + .timeCreate(HaFlowHelper.TIME_CREATE.toString()) + .timeModify(HaFlowHelper.TIME_MODIFY.toString()) .build(); } @@ -533,29 +487,29 @@ private HaSubFlowDumpWrapper createPersistenceHaSubFlowDumpWrapper() { private HaFlowDumpData createHaFlowDumpData(DumpType dumpType, String correlationId) { return HaFlowDumpData.builder() - .affinityGroupId(AFFINITY_GROUP_ID) - .allocateProtectedPath(ALLOCATE_PROTECTED_PATH) - .description(DESCRIPTION) - .diverseGroupId(DIVERSE_GROUP_ID) + .affinityGroupId(HaFlowHelper.AFFINITY_GROUP_ID) + .allocateProtectedPath(HaFlowHelper.ALLOCATE_PROTECTED_PATH) + .description(HaFlowHelper.DESCRIPTION) + .diverseGroupId(HaFlowHelper.DIVERSE_GROUP_ID) .dumpType(dumpType) - .encapsulationType(FLOW_ENCAPSULATION_TYPE) - .flowTimeCreate(TIME_CREATE) - .flowTimeModify(TIME_MODIFY) - .haFlowId(HA_FLOW_ID) - .ignoreBandwidth(IGNORE_BANDWIDTH) - .maximumBandwidth(MAXIMUM_BANDWIDTH) - .maxLatency(MAX_LATENCY) - .maxLatencyTier2(MAX_LATENCY_TIER_2) - .pathComputationStrategy(PATH_COMPUTATION_STRATEGY) - .pinned(PINNED) - .priority(PRIORITY) - .periodicPings(PERIODIC_PINGS) - .status(FLOW_STATUS) - .sharedPort(SHARED_PORT) - .sharedInnerVlan(SHARED_INNER_VLAN) - .sharedOuterVlan(SHARED_OUTER_VLAN) - .sharedSwitchId(SHARED_SWITCH.getSwitchId()) - .strictBandwidth(STRICT_BANDWIDTH) + .encapsulationType(HaFlowHelper.FLOW_ENCAPSULATION_TYPE) + .flowTimeCreate(HaFlowHelper.TIME_CREATE) + .flowTimeModify(HaFlowHelper.TIME_MODIFY) + .haFlowId(HaFlowHelper.HA_FLOW_ID) + .ignoreBandwidth(HaFlowHelper.IGNORE_BANDWIDTH) + .maximumBandwidth(HaFlowHelper.MAXIMUM_BANDWIDTH) + .maxLatency(HaFlowHelper.MAX_LATENCY) + .maxLatencyTier2(HaFlowHelper.MAX_LATENCY_TIER_2) + .pathComputationStrategy(HaFlowHelper.PATH_COMPUTATION_STRATEGY) + .pinned(HaFlowHelper.PINNED) + .priority(HaFlowHelper.PRIORITY) + .periodicPings(HaFlowHelper.PERIODIC_PINGS) + .status(HaFlowHelper.FLOW_STATUS) + .sharedPort(HaFlowHelper.SHARED_PORT) + .sharedInnerVlan(HaFlowHelper.SHARED_INNER_VLAN) + .sharedOuterVlan(HaFlowHelper.SHARED_OUTER_VLAN) + .sharedSwitchId(HaFlowHelper.SHARED_SWITCH.getSwitchId()) + .strictBandwidth(HaFlowHelper.STRICT_BANDWIDTH) .taskId(correlationId) .haSubFlows(Lists.newArrayList(createHaSubFlowDump())) .protectedForwardPath(createHaFlowPathDump(FlowPathDirection.FORWARD)) @@ -580,13 +534,13 @@ private HaFlowPathDump createHaFlowPathDump(FlowPathDirection direction) { .yPointGroupId(GroupId.MIN_FLOW_GROUP_ID) .yPointSwitchId(new SwitchId("00:03")) .yPointMeterId(MeterId.LACP_REPLY_METER_ID) - .timeCreate(TIME_CREATE) - .timeModify(TIME_MODIFY) + .timeCreate(HaFlowHelper.TIME_CREATE) + .timeModify(HaFlowHelper.TIME_MODIFY) .sharedPointMeterId(MeterId.LACP_REPLY_METER_ID) .sharedSwitchId(new SwitchId("00:01")) .cookie(FlowSegmentCookie.builder().direction(direction).build()) - .ignoreBandwidth(IGNORE_BANDWIDTH) - .status(FLOW_PATH_STATUS) + .ignoreBandwidth(HaFlowHelper.IGNORE_BANDWIDTH) + .status(HaFlowHelper.FLOW_PATH_STATUS) .paths(pathNodesList) .haSubFlows(haSubFlowDump) .build(); @@ -594,58 +548,18 @@ private HaFlowPathDump createHaFlowPathDump(FlowPathDirection direction) { private HaSubFlowDump createHaSubFlowDump() { return HaSubFlowDump.builder() - .haSubFlowId(HA_SUB_FLOW_ID) - .haFlowId(HA_FLOW_ID) - .status(FLOW_STATUS) - .endpointSwitchId(ENDPOINT_SWITCH_ID) - .endpointPort(ENDPOINT_PORT) - .endpointVlan(ENDPOINT_VLAN) - .endpointInnerVlan(ENDPOINT_INNER_VLAN) - .description(DESCRIPTION) - .timeCreate(TIME_CREATE) - .timeModify(TIME_MODIFY) + .haSubFlowId(HaFlowHelper.HA_SUB_FLOW_ID) + .haFlowId(HaFlowHelper.HA_FLOW_ID) + .status(HaFlowHelper.FLOW_STATUS) + .endpointSwitchId(HaFlowHelper.ENDPOINT_SWITCH_ID) + .endpointPort(HaFlowHelper.ENDPOINT_PORT) + .endpointVlan(HaFlowHelper.ENDPOINT_VLAN) + .endpointInnerVlan(HaFlowHelper.ENDPOINT_INNER_VLAN) + .description(HaFlowHelper.DESCRIPTION) + .timeCreate(HaFlowHelper.TIME_CREATE) + .timeModify(HaFlowHelper.TIME_MODIFY) .build(); } - private HaFlow createHaFlow() { - HaFlow result = new HaFlow(HA_FLOW_ID, - SHARED_SWITCH, - SHARED_PORT, - SHARED_OUTER_VLAN, - SHARED_INNER_VLAN, - MAXIMUM_BANDWIDTH, - PATH_COMPUTATION_STRATEGY, - FLOW_ENCAPSULATION_TYPE, - MAX_LATENCY, - MAX_LATENCY_TIER_2, - IGNORE_BANDWIDTH, - PERIODIC_PINGS, - PINNED, - PRIORITY, - STRICT_BANDWIDTH, - DESCRIPTION, - ALLOCATE_PROTECTED_PATH, - FLOW_STATUS, - STATUS_INFO, - AFFINITY_GROUP_ID, - DIVERSE_GROUP_ID); - - HaFlowPath forward = createHaFlowPath("forward ID", FlowPathDirection.FORWARD); - HaFlowPath reverse = createHaFlowPath("reverse ID", FlowPathDirection.REVERSE); - - result.setForwardPath(forward); - result.setReversePath(reverse); - result.addPaths(forward, reverse); - - return result; - } - private HaFlowPath createHaFlowPath(String pathId, FlowPathDirection direction) { - return HaFlowPath.builder() - .haPathId(new PathId(pathId)) - .sharedSwitch(SHARED_SWITCH) - .bandwidth(50_000L) - .cookie(FlowSegmentCookie.builder().direction(direction).build()) - .build(); - } } diff --git a/src-java/flowhs-topology/flowhs-storm-topology/src/main/java/org/openkilda/wfm/topology/flowhs/fsm/haflow/delete/actions/DeallocateResourcesAction.java b/src-java/flowhs-topology/flowhs-storm-topology/src/main/java/org/openkilda/wfm/topology/flowhs/fsm/haflow/delete/actions/DeallocateResourcesAction.java index 6b875d82540..5ebecdecb6e 100644 --- a/src-java/flowhs-topology/flowhs-storm-topology/src/main/java/org/openkilda/wfm/topology/flowhs/fsm/haflow/delete/actions/DeallocateResourcesAction.java +++ b/src-java/flowhs-topology/flowhs-storm-topology/src/main/java/org/openkilda/wfm/topology/flowhs/fsm/haflow/delete/actions/DeallocateResourcesAction.java @@ -47,7 +47,7 @@ public void perform(State from, State to, Event event, HaFlowDeleteContext conte resourcesManager.deallocateHaFlowResources(resources)); FlowHistoryService.using(stateMachine.getCarrier()).save(HaFlowHistory - .of(stateMachine.getHaFlowId()) + .of(stateMachine.getCommandContext().getCorrelationId()) .withAction("Flow resources have been deallocated") .withDescription(format("The ha-flow resources for %s / %s have been deallocated", resources.getForward().getPathId(), resources.getReverse().getPathId())) diff --git a/src-java/flowhs-topology/flowhs-storm-topology/src/main/java/org/openkilda/wfm/topology/flowhs/fsm/haflow/update/HaFlowUpdateFsm.java b/src-java/flowhs-topology/flowhs-storm-topology/src/main/java/org/openkilda/wfm/topology/flowhs/fsm/haflow/update/HaFlowUpdateFsm.java index f803229d20f..b29ebf9b1cd 100644 --- a/src-java/flowhs-topology/flowhs-storm-topology/src/main/java/org/openkilda/wfm/topology/flowhs/fsm/haflow/update/HaFlowUpdateFsm.java +++ b/src-java/flowhs-topology/flowhs-storm-topology/src/main/java/org/openkilda/wfm/topology/flowhs/fsm/haflow/update/HaFlowUpdateFsm.java @@ -56,12 +56,14 @@ import org.openkilda.wfm.topology.flowhs.fsm.haflow.update.actions.RevertFlowStatusAction; import org.openkilda.wfm.topology.flowhs.fsm.haflow.update.actions.RevertNewRulesAction; import org.openkilda.wfm.topology.flowhs.fsm.haflow.update.actions.RevertPathsSwapAction; +import org.openkilda.wfm.topology.flowhs.fsm.haflow.update.actions.SkipResourceManagementOnEndpointsUpdateAction; import org.openkilda.wfm.topology.flowhs.fsm.haflow.update.actions.SwapFlowPathsAction; import org.openkilda.wfm.topology.flowhs.fsm.haflow.update.actions.UpdateFlowStatusAction; import org.openkilda.wfm.topology.flowhs.fsm.haflow.update.actions.UpdateHaFlowAction; import org.openkilda.wfm.topology.flowhs.fsm.haflow.update.actions.ValidateFlowAction; import org.openkilda.wfm.topology.flowhs.service.FlowProcessingEventListener; import org.openkilda.wfm.topology.flowhs.service.haflow.HaFlowGenericCarrier; +import org.openkilda.wfm.topology.flowhs.utils.EndpointUpdateType; import io.micrometer.core.instrument.LongTaskTimer; import io.micrometer.core.instrument.LongTaskTimer.Sample; @@ -85,6 +87,7 @@ public final class HaFlowUpdateFsm extends HaFlowPathSwappingFsm { private HaFlowRequest targetHaFlow; private FlowStatus newFlowStatus; + private EndpointUpdateType endpointUpdateType = EndpointUpdateType.NONE; public HaFlowUpdateFsm(@NonNull CommandContext commandContext, @NonNull HaFlowGenericCarrier carrier, @NonNull String flowId, @@ -293,9 +296,14 @@ public Factory(@NonNull HaFlowGenericCarrier carrier, @NonNull Config config, .perform(new HandleNotCompletedCommandsAction<>()); builder.transition().from(State.NEW_RULES_REVERTED) - .to(State.REVERTING_ALLOCATED_RESOURCES) + .to(State.SKIP_REVERTING_RESOURCES_ALLOCATION) .on(Event.NEXT); + builder.transitions().from(State.SKIP_REVERTING_RESOURCES_ALLOCATION) + .toAmong(State.REVERTING_ALLOCATED_RESOURCES, State.REVERTING_FLOW) + .onEach(Event.NEXT, Event.UPDATE_ENDPOINTS_ONLY) + .perform(new SkipResourceManagementOnEndpointsUpdateAction(persistenceManager)); + builder.onEntry(State.REVERTING_ALLOCATED_RESOURCES) .perform(reportErrorAction); builder.transitions().from(State.REVERTING_ALLOCATED_RESOURCES) @@ -335,6 +343,18 @@ public Factory(@NonNull HaFlowGenericCarrier carrier, @NonNull Config config, .on(Event.NEXT) .perform(new NotifyHaFlowMonitorAction<>(persistenceManager, carrier)); + builder.transition() + .from(State.FLOW_UPDATED) + .to(State.RESOURCE_ALLOCATION_COMPLETED) + .on(Event.UPDATE_ENDPOINTS_ONLY) + .perform(new PostResourceAllocationAction(persistenceManager)); + + builder.transition() + .from(State.OLD_RULES_REMOVED) + .to(State.UPDATING_FLOW_STATUS) + .on(Event.UPDATE_ENDPOINTS_ONLY) + .perform(new SkipResourceManagementOnEndpointsUpdateAction(persistenceManager)); + builder.defineFinalState(State.FINISHED) .addEntryAction(new OnFinishedAction(dashboardLogger)); builder.defineFinalState(State.FINISHED_WITH_ERROR) @@ -417,11 +437,13 @@ public enum State { NOTIFY_FLOW_MONITOR_WITH_ERROR, NOTIFY_FLOW_STATS_ON_NEW_PATHS, - NOTIFY_FLOW_STATS_ON_REMOVED_PATHS + NOTIFY_FLOW_STATS_ON_REMOVED_PATHS, + SKIP_REVERTING_RESOURCES_ALLOCATION } public enum Event { NEXT, + UPDATE_ENDPOINTS_ONLY, NO_PATH_FOUND, diff --git a/src-java/flowhs-topology/flowhs-storm-topology/src/main/java/org/openkilda/wfm/topology/flowhs/fsm/haflow/update/actions/SkipResourceManagementOnEndpointsUpdateAction.java b/src-java/flowhs-topology/flowhs-storm-topology/src/main/java/org/openkilda/wfm/topology/flowhs/fsm/haflow/update/actions/SkipResourceManagementOnEndpointsUpdateAction.java new file mode 100644 index 00000000000..3368c85c493 --- /dev/null +++ b/src-java/flowhs-topology/flowhs-storm-topology/src/main/java/org/openkilda/wfm/topology/flowhs/fsm/haflow/update/actions/SkipResourceManagementOnEndpointsUpdateAction.java @@ -0,0 +1,48 @@ +/* Copyright 2023 Telstra Open Source + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.openkilda.wfm.topology.flowhs.fsm.haflow.update.actions; + +import org.openkilda.persistence.PersistenceManager; +import org.openkilda.wfm.topology.flowhs.fsm.common.actions.FlowProcessingWithHistorySupportAction; +import org.openkilda.wfm.topology.flowhs.fsm.haflow.update.HaFlowUpdateContext; +import org.openkilda.wfm.topology.flowhs.fsm.haflow.update.HaFlowUpdateFsm; +import org.openkilda.wfm.topology.flowhs.fsm.haflow.update.HaFlowUpdateFsm.Event; +import org.openkilda.wfm.topology.flowhs.fsm.haflow.update.HaFlowUpdateFsm.State; +import org.openkilda.wfm.topology.flowhs.service.history.FlowHistoryService; +import org.openkilda.wfm.topology.flowhs.service.history.HaFlowHistory; + +public class SkipResourceManagementOnEndpointsUpdateAction extends FlowProcessingWithHistorySupportAction< + HaFlowUpdateFsm, HaFlowUpdateFsm.State, HaFlowUpdateFsm.Event, HaFlowUpdateContext> { + + public SkipResourceManagementOnEndpointsUpdateAction(PersistenceManager persistenceManager) { + super(persistenceManager); + } + + @Override + protected void perform(State from, State to, Event event, + HaFlowUpdateContext context, HaFlowUpdateFsm stateMachine) { + + if (stateMachine.getEndpointUpdateType().isPartialUpdate()) { + + FlowHistoryService.using(stateMachine.getCarrier()).save(HaFlowHistory + .of(stateMachine.getCommandContext().getCorrelationId()) + .withHaFlowId(stateMachine.getHaFlowId()) + .withAction("HA flow endpoints update: skip allocating (deallocating) resources")); + + stateMachine.fire(Event.UPDATE_ENDPOINTS_ONLY); + } + } +} diff --git a/src-java/flowhs-topology/flowhs-storm-topology/src/main/java/org/openkilda/wfm/topology/flowhs/fsm/haflow/update/actions/UpdateHaFlowAction.java b/src-java/flowhs-topology/flowhs-storm-topology/src/main/java/org/openkilda/wfm/topology/flowhs/fsm/haflow/update/actions/UpdateHaFlowAction.java index 155aca0ee2e..eb2bed9727c 100644 --- a/src-java/flowhs-topology/flowhs-storm-topology/src/main/java/org/openkilda/wfm/topology/flowhs/fsm/haflow/update/actions/UpdateHaFlowAction.java +++ b/src-java/flowhs-topology/flowhs-storm-topology/src/main/java/org/openkilda/wfm/topology/flowhs/fsm/haflow/update/actions/UpdateHaFlowAction.java @@ -34,6 +34,7 @@ import org.openkilda.wfm.topology.flowhs.fsm.haflow.update.HaFlowUpdateFsm.State; import org.openkilda.wfm.topology.flowhs.service.history.FlowHistoryService; import org.openkilda.wfm.topology.flowhs.service.history.HaFlowHistory; +import org.openkilda.wfm.topology.flowhs.utils.EndpointUpdateType; import lombok.extern.slf4j.Slf4j; @@ -58,13 +59,25 @@ protected Optional performWithResponse(State from, State to, Event even log.debug("Updating the flow {} with properties: {}", haFlowId, targetHaFlow); - // Complete target ha-flow in FSM with values from original ha-flow + stateMachine.setEndpointUpdateType(EndpointUpdateType.determineUpdateType(haFlow, targetHaFlow)); + + // Complete the target HA-flow in FSM with values from original HA-flow stateMachine.setTargetHaFlow(updateFlow(haFlow, targetHaFlow)); FlowHistoryService.using(stateMachine.getCarrier()).save(HaFlowHistory .of(stateMachine.getCommandContext().getCorrelationId()) .withAction("HA-flow properties have been updated.")); }); + if (stateMachine.getEndpointUpdateType().isPartialUpdate()) { + stateMachine.setNewPrimaryPathIds(stateMachine.getOldPrimaryPathIds()); + stateMachine.setNewProtectedPathIds(stateMachine.getOldProtectedPathIds()); + + FlowHistoryService.using(stateMachine.getCarrier()).save(HaFlowHistory + .of(stateMachine.getCommandContext().getCorrelationId()) + .withAction("HA-flow endpoints update is executed.")); + stateMachine.fire(Event.UPDATE_ENDPOINTS_ONLY); + } + return Optional.empty(); } diff --git a/src-java/flowhs-topology/flowhs-storm-topology/src/main/java/org/openkilda/wfm/topology/flowhs/fsm/update/FlowUpdateFsm.java b/src-java/flowhs-topology/flowhs-storm-topology/src/main/java/org/openkilda/wfm/topology/flowhs/fsm/update/FlowUpdateFsm.java index b7cd832d87f..699bb62a219 100644 --- a/src-java/flowhs-topology/flowhs-storm-topology/src/main/java/org/openkilda/wfm/topology/flowhs/fsm/update/FlowUpdateFsm.java +++ b/src-java/flowhs-topology/flowhs-storm-topology/src/main/java/org/openkilda/wfm/topology/flowhs/fsm/update/FlowUpdateFsm.java @@ -70,6 +70,7 @@ import org.openkilda.wfm.topology.flowhs.model.RequestedFlow; import org.openkilda.wfm.topology.flowhs.service.FlowUpdateEventListener; import org.openkilda.wfm.topology.flowhs.service.FlowUpdateHubCarrier; +import org.openkilda.wfm.topology.flowhs.utils.EndpointUpdateType; import io.micrometer.core.instrument.LongTaskTimer; import io.micrometer.core.instrument.LongTaskTimer.Sample; @@ -106,7 +107,7 @@ public final class FlowUpdateFsm extends FlowPathSwappingFsm bulkUpdateFlowIds; private boolean doNotRevert; - private EndpointUpdate endpointUpdate = EndpointUpdate.NONE; + private EndpointUpdateType endpointUpdateType = EndpointUpdateType.NONE; private FlowLoopOperation flowLoopOperation = FlowLoopOperation.NONE; public FlowUpdateFsm(@NonNull CommandContext commandContext, @NonNull FlowUpdateHubCarrier carrier, @@ -454,23 +455,6 @@ public FlowUpdateFsm newInstance(@NonNull String flowId, @NonNull CommandContext } } - public enum EndpointUpdate { - NONE(false), - SOURCE(true), - DESTINATION(true), - BOTH(true); - - private boolean partialUpdate; - - EndpointUpdate(boolean partialUpdate) { - this.partialUpdate = partialUpdate; - } - - public boolean isPartialUpdate() { - return partialUpdate; - } - } - public enum FlowLoopOperation { NONE, CREATE, diff --git a/src-java/flowhs-topology/flowhs-storm-topology/src/main/java/org/openkilda/wfm/topology/flowhs/fsm/update/actions/HandleNotCompletedCommandsAction.java b/src-java/flowhs-topology/flowhs-storm-topology/src/main/java/org/openkilda/wfm/topology/flowhs/fsm/update/actions/HandleNotCompletedCommandsAction.java index 4f584bc2e0e..d06bc83e3ce 100644 --- a/src-java/flowhs-topology/flowhs-storm-topology/src/main/java/org/openkilda/wfm/topology/flowhs/fsm/update/actions/HandleNotCompletedCommandsAction.java +++ b/src-java/flowhs-topology/flowhs-storm-topology/src/main/java/org/openkilda/wfm/topology/flowhs/fsm/update/actions/HandleNotCompletedCommandsAction.java @@ -52,7 +52,7 @@ public void perform(State from, State to, Event event, FlowUpdateContext context log.debug("Abandoning all pending commands: {}", stateMachine.getPendingCommands()); stateMachine.clearPendingCommands(); - if (stateMachine.getEndpointUpdate().isPartialUpdate()) { + if (stateMachine.getEndpointUpdateType().isPartialUpdate()) { stateMachine.saveActionToHistory("Skip paths and resources allocation"); stateMachine.fire(Event.UPDATE_ENDPOINT_RULES_ONLY); } diff --git a/src-java/flowhs-topology/flowhs-storm-topology/src/main/java/org/openkilda/wfm/topology/flowhs/fsm/update/actions/InstallIngressRulesAction.java b/src-java/flowhs-topology/flowhs-storm-topology/src/main/java/org/openkilda/wfm/topology/flowhs/fsm/update/actions/InstallIngressRulesAction.java index fecf474bcfe..bff63d74a40 100644 --- a/src-java/flowhs-topology/flowhs-storm-topology/src/main/java/org/openkilda/wfm/topology/flowhs/fsm/update/actions/InstallIngressRulesAction.java +++ b/src-java/flowhs-topology/flowhs-storm-topology/src/main/java/org/openkilda/wfm/topology/flowhs/fsm/update/actions/InstallIngressRulesAction.java @@ -65,7 +65,7 @@ protected void perform(State from, State to, Event event, FlowUpdateContext cont SpeakerRequestBuildContext speakerContext = buildBaseSpeakerContextForInstall( newPrimaryForward.getSrcSwitchId(), newPrimaryReverse.getSrcSwitchId()); Collection commands = new ArrayList<>(); - switch (stateMachine.getEndpointUpdate()) { + switch (stateMachine.getEndpointUpdateType()) { case SOURCE: speakerContext.getForward().setUpdateMeter(false); commands.addAll(getCommandsForSourceUpdate(commandBuilder, stateMachine, flow, @@ -76,7 +76,7 @@ protected void perform(State from, State to, Event event, FlowUpdateContext cont commands.addAll(getCommandsForDestinationUpdate(commandBuilder, stateMachine, flow, newPrimaryForward, newPrimaryReverse, speakerContext)); break; - case BOTH: + case ALL: speakerContext.getForward().setUpdateMeter(false); speakerContext.getReverse().setUpdateMeter(false); if (stateMachine.getFlowLoopOperation() == FlowLoopOperation.NONE) { diff --git a/src-java/flowhs-topology/flowhs-storm-topology/src/main/java/org/openkilda/wfm/topology/flowhs/fsm/update/actions/InstallNonIngressRulesAction.java b/src-java/flowhs-topology/flowhs-storm-topology/src/main/java/org/openkilda/wfm/topology/flowhs/fsm/update/actions/InstallNonIngressRulesAction.java index b9038ff915f..ee2f4ba26fd 100644 --- a/src-java/flowhs-topology/flowhs-storm-topology/src/main/java/org/openkilda/wfm/topology/flowhs/fsm/update/actions/InstallNonIngressRulesAction.java +++ b/src-java/flowhs-topology/flowhs-storm-topology/src/main/java/org/openkilda/wfm/topology/flowhs/fsm/update/actions/InstallNonIngressRulesAction.java @@ -90,8 +90,8 @@ private Collection buildCommands(FlowCommandBuilder c FlowUpdateFsm stateMachine, Flow flow, FlowPath path, FlowPath oppositePath) { CommandContext context = stateMachine.getCommandContext(); - switch (stateMachine.getEndpointUpdate()) { - case BOTH: + switch (stateMachine.getEndpointUpdateType()) { + case ALL: return new ArrayList<>(commandBuilder.buildEgressOnly(context, flow, path, oppositePath)); case SOURCE: return buildCommandsForSourceUpdate(commandBuilder, stateMachine, flow, path, oppositePath, context); diff --git a/src-java/flowhs-topology/flowhs-storm-topology/src/main/java/org/openkilda/wfm/topology/flowhs/fsm/update/actions/RemoveOldRulesAction.java b/src-java/flowhs-topology/flowhs-storm-topology/src/main/java/org/openkilda/wfm/topology/flowhs/fsm/update/actions/RemoveOldRulesAction.java index 2afb24d5e1b..724fa7f5890 100644 --- a/src-java/flowhs-topology/flowhs-storm-topology/src/main/java/org/openkilda/wfm/topology/flowhs/fsm/update/actions/RemoveOldRulesAction.java +++ b/src-java/flowhs-topology/flowhs-storm-topology/src/main/java/org/openkilda/wfm/topology/flowhs/fsm/update/actions/RemoveOldRulesAction.java @@ -61,11 +61,11 @@ protected void perform(State from, State to, Event event, FlowUpdateContext cont Flow originalFlow = getOriginalFlowWithPaths(stateMachine, stateMachine.getOriginalFlow()); MirrorContext mirrorContext = MirrorContext.builder().removeFlowOperation(true).build(); - if (stateMachine.getEndpointUpdate().isPartialUpdate()) { + if (stateMachine.getEndpointUpdateType().isPartialUpdate()) { SpeakerRequestBuildContext speakerContext = getSpeakerRequestBuildContext(stateMachine, false); FlowPath forward = getFlowPath(stateMachine.getOldPrimaryForwardPath()); FlowPath reverse = getFlowPath(stateMachine.getOldPrimaryReversePath()); - switch (stateMachine.getEndpointUpdate()) { + switch (stateMachine.getEndpointUpdateType()) { case SOURCE: factories.addAll(buildCommandsForSourceUpdate(commandBuilder, stateMachine, originalFlow, forward, reverse, speakerContext, mirrorContext.toBuilder().removeGroup(false).build())); @@ -74,7 +74,7 @@ protected void perform(State from, State to, Event event, FlowUpdateContext cont factories.addAll(buildCommandsForDestinationUpdate(commandBuilder, stateMachine, originalFlow, forward, reverse, speakerContext, mirrorContext.toBuilder().removeGroup(false).build())); break; - case BOTH: + case ALL: default: switch (stateMachine.getFlowLoopOperation()) { case DELETE: diff --git a/src-java/flowhs-topology/flowhs-storm-topology/src/main/java/org/openkilda/wfm/topology/flowhs/fsm/update/actions/RevertNewRulesAction.java b/src-java/flowhs-topology/flowhs-storm-topology/src/main/java/org/openkilda/wfm/topology/flowhs/fsm/update/actions/RevertNewRulesAction.java index c74821e31cc..55d75e10304 100644 --- a/src-java/flowhs-topology/flowhs-storm-topology/src/main/java/org/openkilda/wfm/topology/flowhs/fsm/update/actions/RevertNewRulesAction.java +++ b/src-java/flowhs-topology/flowhs-storm-topology/src/main/java/org/openkilda/wfm/topology/flowhs/fsm/update/actions/RevertNewRulesAction.java @@ -88,11 +88,11 @@ protected void perform(State from, State to, Event event, FlowUpdateContext cont if (stateMachine.getNewPrimaryForwardPath() != null && stateMachine.getNewPrimaryReversePath() != null) { FlowPath newForward = getFlowPath(flow, stateMachine.getNewPrimaryForwardPath()); FlowPath newReverse = getFlowPath(flow, stateMachine.getNewPrimaryReversePath()); - if (stateMachine.getEndpointUpdate().isPartialUpdate()) { + if (stateMachine.getEndpointUpdateType().isPartialUpdate()) { SpeakerRequestBuildContext speakerRequestBuildContext = getSpeakerRequestBuildContextForRemoval( stateMachine, false); Flow oldFlow = RequestedFlowMapper.INSTANCE.toFlow(stateMachine.getOriginalFlow()); - switch (stateMachine.getEndpointUpdate()) { + switch (stateMachine.getEndpointUpdateType()) { case SOURCE: switch (stateMachine.getFlowLoopOperation()) { case NONE: @@ -156,8 +156,8 @@ protected void perform(State from, State to, Event event, FlowUpdateContext cont FlowPath newReverse = getFlowPath(flow, stateMachine.getNewProtectedReversePath()); Flow oldFlow = RequestedFlowMapper.INSTANCE.toFlow(stateMachine.getOriginalFlow()); - if (stateMachine.getEndpointUpdate().isPartialUpdate()) { - switch (stateMachine.getEndpointUpdate()) { + if (stateMachine.getEndpointUpdateType().isPartialUpdate()) { + switch (stateMachine.getEndpointUpdateType()) { case SOURCE: if (stateMachine.getFlowLoopOperation() == NONE) { revertCommands.addAll(commandBuilder.buildEgressOnlyOneDirection(commandContext, diff --git a/src-java/flowhs-topology/flowhs-storm-topology/src/main/java/org/openkilda/wfm/topology/flowhs/fsm/update/actions/RevertPathsSwapAction.java b/src-java/flowhs-topology/flowhs-storm-topology/src/main/java/org/openkilda/wfm/topology/flowhs/fsm/update/actions/RevertPathsSwapAction.java index 7514e7b7524..b8ffcb5b383 100644 --- a/src-java/flowhs-topology/flowhs-storm-topology/src/main/java/org/openkilda/wfm/topology/flowhs/fsm/update/actions/RevertPathsSwapAction.java +++ b/src-java/flowhs-topology/flowhs-storm-topology/src/main/java/org/openkilda/wfm/topology/flowhs/fsm/update/actions/RevertPathsSwapAction.java @@ -38,7 +38,7 @@ public RevertPathsSwapAction(PersistenceManager persistenceManager) { @Override protected void perform(State from, State to, Event event, FlowUpdateContext context, FlowUpdateFsm stateMachine) { - if (stateMachine.getEndpointUpdate().isPartialUpdate()) { + if (stateMachine.getEndpointUpdateType().isPartialUpdate()) { stateMachine.saveActionToHistory("Skip paths swap"); return; } diff --git a/src-java/flowhs-topology/flowhs-storm-topology/src/main/java/org/openkilda/wfm/topology/flowhs/fsm/update/actions/SkipPathsAndResourcesDeallocationAction.java b/src-java/flowhs-topology/flowhs-storm-topology/src/main/java/org/openkilda/wfm/topology/flowhs/fsm/update/actions/SkipPathsAndResourcesDeallocationAction.java index dbbcccfce23..9acf91080d8 100644 --- a/src-java/flowhs-topology/flowhs-storm-topology/src/main/java/org/openkilda/wfm/topology/flowhs/fsm/update/actions/SkipPathsAndResourcesDeallocationAction.java +++ b/src-java/flowhs-topology/flowhs-storm-topology/src/main/java/org/openkilda/wfm/topology/flowhs/fsm/update/actions/SkipPathsAndResourcesDeallocationAction.java @@ -43,7 +43,7 @@ public SkipPathsAndResourcesDeallocationAction(PersistenceManager persistenceMan @Override public void perform(State from, State to, Event event, FlowUpdateContext context, FlowUpdateFsm stateMachine) { - if (stateMachine.getEndpointUpdate().isPartialUpdate()) { + if (stateMachine.getEndpointUpdateType().isPartialUpdate()) { Flow originalFlow = RequestedFlowMapper.INSTANCE.toFlow(stateMachine.getOriginalFlow()); originalFlow.setAffinityGroupId(stateMachine.getOriginalAffinityFlowGroup()); diff --git a/src-java/flowhs-topology/flowhs-storm-topology/src/main/java/org/openkilda/wfm/topology/flowhs/fsm/update/actions/UpdateFlowAction.java b/src-java/flowhs-topology/flowhs-storm-topology/src/main/java/org/openkilda/wfm/topology/flowhs/fsm/update/actions/UpdateFlowAction.java index 825d99ea3d0..29a55df5bea 100644 --- a/src-java/flowhs-topology/flowhs-storm-topology/src/main/java/org/openkilda/wfm/topology/flowhs/fsm/update/actions/UpdateFlowAction.java +++ b/src-java/flowhs-topology/flowhs-storm-topology/src/main/java/org/openkilda/wfm/topology/flowhs/fsm/update/actions/UpdateFlowAction.java @@ -33,15 +33,14 @@ import org.openkilda.wfm.topology.flowhs.fsm.common.actions.NbTrackableWithHistorySupportAction; import org.openkilda.wfm.topology.flowhs.fsm.update.FlowUpdateContext; import org.openkilda.wfm.topology.flowhs.fsm.update.FlowUpdateFsm; -import org.openkilda.wfm.topology.flowhs.fsm.update.FlowUpdateFsm.EndpointUpdate; import org.openkilda.wfm.topology.flowhs.fsm.update.FlowUpdateFsm.Event; import org.openkilda.wfm.topology.flowhs.fsm.update.FlowUpdateFsm.FlowLoopOperation; import org.openkilda.wfm.topology.flowhs.fsm.update.FlowUpdateFsm.State; import org.openkilda.wfm.topology.flowhs.mapper.RequestedFlowMapper; import org.openkilda.wfm.topology.flowhs.model.RequestedFlow; +import org.openkilda.wfm.topology.flowhs.utils.EndpointUpdateType; import lombok.extern.slf4j.Slf4j; -import org.apache.storm.shade.com.google.common.base.Objects; import java.util.Optional; @@ -77,12 +76,12 @@ protected Optional performWithResponse(State from, State to, Event even // Complete target flow in FSM with values from original flow stateMachine.setTargetFlow(updateFlow(flow, targetFlow)); - EndpointUpdate endpointUpdate = updateEndpointRulesOnly(originalFlow, targetFlow, + EndpointUpdateType endpointUpdateType = EndpointUpdateType.determineUpdateType(originalFlow, targetFlow, stateMachine.getOriginalDiverseFlowGroup(), flow.getDiverseGroupId(), stateMachine.getOriginalAffinityFlowGroup(), flow.getAffinityGroupId()); - stateMachine.setEndpointUpdate(endpointUpdate); + stateMachine.setEndpointUpdateType(endpointUpdateType); - if (endpointUpdate.isPartialUpdate()) { + if (endpointUpdateType.isPartialUpdate()) { FlowLoopOperation flowLoopOperation = detectFlowLoopOperation(originalFlow, targetFlow); stateMachine.setFlowLoopOperation(flowLoopOperation); @@ -101,7 +100,7 @@ protected Optional performWithResponse(State from, State to, Event even stateMachine.saveActionToHistory("The flow properties were updated"); - if (stateMachine.getEndpointUpdate().isPartialUpdate()) { + if (stateMachine.getEndpointUpdateType().isPartialUpdate()) { stateMachine.saveActionToHistory("Skip paths and resources allocation"); stateMachine.fire(Event.UPDATE_ENDPOINT_RULES_ONLY); } @@ -209,59 +208,6 @@ private String getOrCreateAffinityFlowGroupId(String flowId) throws FlowProcessi format("Flow %s not found", flowId))); } - private FlowUpdateFsm.EndpointUpdate updateEndpointRulesOnly(RequestedFlow originalFlow, RequestedFlow targetFlow, - String originalDiverseGroupId, - String targetDiverseGroupId, - String originalAffinityGroupId, - String targetAffinityGroupId) { - boolean updateEndpointOnly = originalFlow.getSrcSwitch().equals(targetFlow.getSrcSwitch()); - updateEndpointOnly &= originalFlow.getDestSwitch().equals(targetFlow.getDestSwitch()); - - updateEndpointOnly &= originalFlow.isAllocateProtectedPath() == targetFlow.isAllocateProtectedPath(); - updateEndpointOnly &= originalFlow.getBandwidth() == targetFlow.getBandwidth(); - updateEndpointOnly &= originalFlow.isIgnoreBandwidth() == targetFlow.isIgnoreBandwidth(); - updateEndpointOnly &= originalFlow.isStrictBandwidth() == targetFlow.isStrictBandwidth(); - - updateEndpointOnly &= Objects.equal(originalFlow.getMaxLatency(), targetFlow.getMaxLatency()); - updateEndpointOnly &= Objects.equal(originalFlow.getFlowEncapsulationType(), - targetFlow.getFlowEncapsulationType()); - updateEndpointOnly &= Objects.equal(originalFlow.getPathComputationStrategy(), - targetFlow.getPathComputationStrategy()); - - updateEndpointOnly &= Objects.equal(originalDiverseGroupId, targetDiverseGroupId); - updateEndpointOnly &= Objects.equal(originalAffinityGroupId, targetAffinityGroupId); - - // TODO(tdurakov): check connected devices as well - boolean srcEndpointChanged = originalFlow.getSrcPort() != targetFlow.getSrcPort(); - srcEndpointChanged |= originalFlow.getSrcVlan() != targetFlow.getSrcVlan(); - srcEndpointChanged |= originalFlow.getSrcInnerVlan() != targetFlow.getSrcInnerVlan(); - - // TODO(tdurakov): check connected devices as well - boolean dstEndpointChanged = originalFlow.getDestPort() != targetFlow.getDestPort(); - dstEndpointChanged |= originalFlow.getDestVlan() != targetFlow.getDestVlan(); - dstEndpointChanged |= originalFlow.getDestInnerVlan() != targetFlow.getDestInnerVlan(); - - if (originalFlow.getLoopSwitchId() != targetFlow.getLoopSwitchId()) { - srcEndpointChanged |= originalFlow.getSrcSwitch().equals(originalFlow.getLoopSwitchId()); - srcEndpointChanged |= originalFlow.getSrcSwitch().equals(targetFlow.getLoopSwitchId()); - dstEndpointChanged |= originalFlow.getDestSwitch().equals(originalFlow.getLoopSwitchId()); - dstEndpointChanged |= originalFlow.getDestSwitch().equals(targetFlow.getLoopSwitchId()); - } - - if (updateEndpointOnly) { - if (srcEndpointChanged && dstEndpointChanged) { - return EndpointUpdate.BOTH; - } else if (srcEndpointChanged) { - return EndpointUpdate.SOURCE; - } else if (dstEndpointChanged) { - return EndpointUpdate.DESTINATION; - } else { - return EndpointUpdate.NONE; - } - } - return EndpointUpdate.NONE; - } - private FlowLoopOperation detectFlowLoopOperation(RequestedFlow originalFlow, RequestedFlow targetFlow) { if (originalFlow.getLoopSwitchId() == null && targetFlow.getLoopSwitchId() == null) { return FlowLoopOperation.NONE; diff --git a/src-java/flowhs-topology/flowhs-storm-topology/src/main/java/org/openkilda/wfm/topology/flowhs/utils/EndpointUpdateType.java b/src-java/flowhs-topology/flowhs-storm-topology/src/main/java/org/openkilda/wfm/topology/flowhs/utils/EndpointUpdateType.java new file mode 100644 index 00000000000..a342768e326 --- /dev/null +++ b/src-java/flowhs-topology/flowhs-storm-topology/src/main/java/org/openkilda/wfm/topology/flowhs/utils/EndpointUpdateType.java @@ -0,0 +1,179 @@ +/* Copyright 2023 Telstra Open Source + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.openkilda.wfm.topology.flowhs.utils; + +import org.openkilda.messaging.command.haflow.HaFlowRequest; +import org.openkilda.messaging.command.haflow.HaSubFlowDto; +import org.openkilda.model.FlowEndpoint; +import org.openkilda.model.HaFlow; +import org.openkilda.model.HaSubFlow; +import org.openkilda.model.SwitchId; +import org.openkilda.wfm.topology.flowhs.model.RequestedFlow; + +import org.apache.storm.shade.com.google.common.base.Objects; + +import java.util.Collection; +import java.util.Map; +import java.util.Set; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +public enum EndpointUpdateType { + NONE, + SOURCE, + DESTINATION, + SHARED, + SUBFLOWS, + ALL; + + public boolean isPartialUpdate() { + return !this.equals(NONE); + } + + /** + * Determines whether an update to the target HA-flow requires only endpoints update or a full update. + * @param originalHaFlow an original HA-flow which is to be updated + * @param targetHaFlow a target HA-flow from the update request + * @return a type of the update required + */ + public static EndpointUpdateType determineUpdateType(HaFlow originalHaFlow, HaFlowRequest targetHaFlow) { + if (originalHaFlow == null || targetHaFlow == null) { + throw new IllegalArgumentException("Original and target flow object cannot be null"); + } + + boolean updateEndpointOnly = originalHaFlow.getEndpointSwitchIds().equals(getEndpointSwitchIds(targetHaFlow)) + && Objects.equal(originalHaFlow.isAllocateProtectedPath(), targetHaFlow.isAllocateProtectedPath()) + && originalHaFlow.getMaximumBandwidth() == targetHaFlow.getMaximumBandwidth() + && originalHaFlow.isIgnoreBandwidth() == targetHaFlow.isIgnoreBandwidth() + && originalHaFlow.isStrictBandwidth() == targetHaFlow.isStrictBandwidth() + && Objects.equal(originalHaFlow.getMaxLatency(), targetHaFlow.getMaxLatency()) + && Objects.equal(originalHaFlow.getMaxLatencyTier2(), targetHaFlow.getMaxLatencyTier2()) + && Objects.equal(originalHaFlow.getEncapsulationType(), targetHaFlow.getEncapsulationType()) + && Objects.equal(originalHaFlow.getPathComputationStrategy(), targetHaFlow.getPathComputationStrategy()) + && Objects.equal(originalHaFlow.getDiverseGroupId(), targetHaFlow.getDiverseFlowId()); + + boolean sharedEndpointChanged = + !originalHaFlow.getSharedEndpoint().isSwitchPortVlanEquals(targetHaFlow.getSharedEndpoint()); + + boolean subflowEndpointChanged = isSubflowEndpointChanged( + originalHaFlow.getHaSubFlows(), targetHaFlow.getSubFlows()); + + if (updateEndpointOnly && sharedEndpointChanged && subflowEndpointChanged) { + return ALL; + } else if (updateEndpointOnly && sharedEndpointChanged) { + return SHARED; + } else if (updateEndpointOnly && subflowEndpointChanged) { + return SUBFLOWS; + } else { + return NONE; + } + } + + /** + * Determines whether an update to the target flow requires only endpoints update or a full update. + * @param originalFlow an original flow to be updated + * @param targetFlow a target from the update request + * @param originalDiverseGroupId a diverse group ID for the original flow + * @param targetDiverseGroupId a diverse group ID for the target flow + * @param originalAffinityGroupId an affinity group ID for the original flow + * @param targetAffinityGroupId an affinity group ID for the target flow + * @return a type of the update required + */ + public static EndpointUpdateType determineUpdateType(RequestedFlow originalFlow, RequestedFlow targetFlow, + String originalDiverseGroupId, + String targetDiverseGroupId, + String originalAffinityGroupId, + String targetAffinityGroupId) { + boolean updateEndpointOnly = originalFlow.getSrcSwitch().equals(targetFlow.getSrcSwitch()); + updateEndpointOnly &= originalFlow.getDestSwitch().equals(targetFlow.getDestSwitch()); + + updateEndpointOnly &= originalFlow.isAllocateProtectedPath() == targetFlow.isAllocateProtectedPath(); + updateEndpointOnly &= originalFlow.getBandwidth() == targetFlow.getBandwidth(); + updateEndpointOnly &= originalFlow.isIgnoreBandwidth() == targetFlow.isIgnoreBandwidth(); + updateEndpointOnly &= originalFlow.isStrictBandwidth() == targetFlow.isStrictBandwidth(); + + updateEndpointOnly &= Objects.equal(originalFlow.getMaxLatency(), targetFlow.getMaxLatency()); + updateEndpointOnly &= Objects.equal(originalFlow.getMaxLatencyTier2(), targetFlow.getMaxLatencyTier2()); + updateEndpointOnly &= Objects.equal(originalFlow.getFlowEncapsulationType(), + targetFlow.getFlowEncapsulationType()); + updateEndpointOnly &= Objects.equal(originalFlow.getPathComputationStrategy(), + targetFlow.getPathComputationStrategy()); + + updateEndpointOnly &= Objects.equal(originalDiverseGroupId, targetDiverseGroupId); + updateEndpointOnly &= Objects.equal(originalAffinityGroupId, targetAffinityGroupId); + + // TODO(tdurakov): check connected devices as well + boolean srcEndpointChanged = originalFlow.getSrcPort() != targetFlow.getSrcPort(); + srcEndpointChanged |= originalFlow.getSrcVlan() != targetFlow.getSrcVlan(); + srcEndpointChanged |= originalFlow.getSrcInnerVlan() != targetFlow.getSrcInnerVlan(); + + // TODO(tdurakov): check connected devices as well + boolean dstEndpointChanged = originalFlow.getDestPort() != targetFlow.getDestPort(); + dstEndpointChanged |= originalFlow.getDestVlan() != targetFlow.getDestVlan(); + dstEndpointChanged |= originalFlow.getDestInnerVlan() != targetFlow.getDestInnerVlan(); + + if (originalFlow.getLoopSwitchId() != targetFlow.getLoopSwitchId()) { + srcEndpointChanged |= originalFlow.getSrcSwitch().equals(originalFlow.getLoopSwitchId()); + srcEndpointChanged |= originalFlow.getSrcSwitch().equals(targetFlow.getLoopSwitchId()); + dstEndpointChanged |= originalFlow.getDestSwitch().equals(originalFlow.getLoopSwitchId()); + dstEndpointChanged |= originalFlow.getDestSwitch().equals(targetFlow.getLoopSwitchId()); + } + + if (updateEndpointOnly) { + if (srcEndpointChanged && dstEndpointChanged) { + return EndpointUpdateType.ALL; + } else if (srcEndpointChanged) { + return EndpointUpdateType.SOURCE; + } else if (dstEndpointChanged) { + return EndpointUpdateType.DESTINATION; + } else { + return EndpointUpdateType.NONE; + } + } + return EndpointUpdateType.NONE; + } + + /** + * Compares the corresponding subflows endpoints using their IDs. + * @param original HA subflows from the original HA-flow + * @param target HA subflows from the target HA-flow + * @return true if one of the endpoints has changed port or VLAN IDs. + */ + private static boolean isSubflowEndpointChanged(Collection original, Collection target) { + Map originalEndpoints = original.stream().collect( + Collectors.toMap(HaSubFlow::getHaSubFlowId, HaSubFlow::getEndpoint, + EndpointUpdateType::throwOnDuplicate)); + + Map targetEndpoints = target.stream().collect( + Collectors.toMap(HaSubFlowDto::getFlowId, HaSubFlowDto::getEndpoint, + EndpointUpdateType::throwOnDuplicate)); + + return originalEndpoints.entrySet().stream() + .map(e -> e.getValue().isSwitchPortVlanEquals(targetEndpoints.get(e.getKey()))) + .anyMatch(b -> b == Boolean.FALSE); + } + + private static T throwOnDuplicate(T left, T right) { + throw new IllegalArgumentException("HA-flow has HA sub flows with the duplicated ID. " + + "Could not distinguish endpoints"); + } + + private static Set getEndpointSwitchIds(HaFlowRequest haFlowRequest) { + return Stream.concat(Stream.of(haFlowRequest.getSharedEndpoint()), + haFlowRequest.getSubFlows().stream().map(HaSubFlowDto::getEndpoint)) + .map(FlowEndpoint::getSwitchId).collect(Collectors.toSet()); + } +} diff --git a/src-java/flowhs-topology/flowhs-storm-topology/src/test/java/org/openkilda/wfm/topology/flowhs/utils/EndpointUpdateTypeTest.java b/src-java/flowhs-topology/flowhs-storm-topology/src/test/java/org/openkilda/wfm/topology/flowhs/utils/EndpointUpdateTypeTest.java new file mode 100644 index 00000000000..4e4a2031984 --- /dev/null +++ b/src-java/flowhs-topology/flowhs-storm-topology/src/test/java/org/openkilda/wfm/topology/flowhs/utils/EndpointUpdateTypeTest.java @@ -0,0 +1,157 @@ +/* Copyright 2023 Telstra Open Source + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.openkilda.wfm.topology.flowhs.utils; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.openkilda.wfm.topology.flowhs.utils.EndpointUpdateType.ALL; +import static org.openkilda.wfm.topology.flowhs.utils.EndpointUpdateType.DESTINATION; +import static org.openkilda.wfm.topology.flowhs.utils.EndpointUpdateType.NONE; +import static org.openkilda.wfm.topology.flowhs.utils.EndpointUpdateType.SHARED; +import static org.openkilda.wfm.topology.flowhs.utils.EndpointUpdateType.SOURCE; +import static org.openkilda.wfm.topology.flowhs.utils.EndpointUpdateType.SUBFLOWS; + +import org.openkilda.messaging.command.haflow.HaFlowRequest; +import org.openkilda.model.FlowEndpoint; +import org.openkilda.model.HaFlow; +import org.openkilda.wfm.HaFlowHelper; +import org.openkilda.wfm.topology.flowhs.mapper.HaFlowMapper; + +import org.junit.jupiter.api.Test; + +class EndpointUpdateTypeTest { + + @Test + void whenNoDifference_updateNone() { + HaFlow original = HaFlowHelper.createHaFlow(); + HaFlowRequest target = HaFlowMapper.INSTANCE.toHaFlowRequest( + original, original.getDiverseGroupId(), HaFlowRequest.Type.UPDATE); + + assertEquals(NONE, EndpointUpdateType.determineUpdateType(original, target)); + } + + @Test + void whenOnlySharedChanged_updateShared() { + HaFlow original = HaFlowHelper.createHaFlow(); + HaFlowRequest target = HaFlowMapper.INSTANCE.toHaFlowRequest( + original, original.getDiverseGroupId(), HaFlowRequest.Type.UPDATE); + target.setSharedEndpoint(FlowEndpoint.builder() + .innerVlanId(HaFlowHelper.SHARED_INNER_VLAN + 1) + .outerVlanId(HaFlowHelper.SHARED_OUTER_VLAN) + .portNumber(HaFlowHelper.SHARED_PORT) + .switchId(HaFlowHelper.SHARED_SWITCH.getSwitchId()) + .build()); + + assertEquals(SHARED, EndpointUpdateType.determineUpdateType(original, target)); + + target.setSharedEndpoint(FlowEndpoint.builder() + .innerVlanId(HaFlowHelper.SHARED_INNER_VLAN) + .outerVlanId(HaFlowHelper.SHARED_OUTER_VLAN) + .portNumber(HaFlowHelper.SHARED_PORT + 1) + .switchId(HaFlowHelper.SHARED_SWITCH.getSwitchId()) + .build()); + + assertEquals(SHARED, EndpointUpdateType.determineUpdateType(original, target)); + } + + @Test + void whenOnlySubflowEndpointChanged_updateSubflows() { + HaFlow original = HaFlowHelper.createHaFlow(); + HaFlowRequest target = HaFlowMapper.INSTANCE.toHaFlowRequest( + original, original.getDiverseGroupId(), HaFlowRequest.Type.UPDATE); + + FlowEndpoint originalEndpointA = target.getSubFlows().get(0).getEndpoint(); + target.getSubFlows().get(0).setEndpoint(FlowEndpoint.builder() + .innerVlanId(originalEndpointA.getInnerVlanId() + 1) + .outerVlanId(originalEndpointA.getOuterVlanId()) + .portNumber(originalEndpointA.getPortNumber()) + .switchId(originalEndpointA.getSwitchId()) + .build()); + + assertEquals(SUBFLOWS, EndpointUpdateType.determineUpdateType(original, target)); + + target.getSubFlows().get(0).setEndpoint(FlowEndpoint.builder() + .innerVlanId(originalEndpointA.getInnerVlanId()) + .outerVlanId(originalEndpointA.getOuterVlanId()) + .portNumber(originalEndpointA.getPortNumber() + 1) + .switchId(originalEndpointA.getSwitchId()) + .build()); + + assertEquals(SUBFLOWS, EndpointUpdateType.determineUpdateType(original, target)); + + target.getSubFlows().get(0).setEndpoint(FlowEndpoint.builder() + .innerVlanId(originalEndpointA.getInnerVlanId()) + .outerVlanId(originalEndpointA.getOuterVlanId()) + .portNumber(originalEndpointA.getPortNumber()) + .switchId(originalEndpointA.getSwitchId()) + .build()); + FlowEndpoint originalEndpointB = target.getSubFlows().get(1).getEndpoint(); + target.getSubFlows().get(1).setEndpoint(FlowEndpoint.builder() + .innerVlanId(originalEndpointB.getInnerVlanId()) + .outerVlanId(originalEndpointB.getOuterVlanId()) + .portNumber(originalEndpointB.getPortNumber() + 1) + .switchId(originalEndpointB.getSwitchId()) + .build()); + + assertEquals(SUBFLOWS, EndpointUpdateType.determineUpdateType(original, target)); + } + + @Test + void whenSharedAndSubflowEndpointsChanged_updateAll() { + HaFlow original = HaFlowHelper.createHaFlow(); + HaFlowRequest target = HaFlowMapper.INSTANCE.toHaFlowRequest( + original, original.getDiverseGroupId(), HaFlowRequest.Type.UPDATE); + + FlowEndpoint originalEndpointA = target.getSubFlows().get(0).getEndpoint(); + target.getSubFlows().get(0).setEndpoint(FlowEndpoint.builder() + .innerVlanId(originalEndpointA.getInnerVlanId() + 1) + .outerVlanId(originalEndpointA.getOuterVlanId()) + .portNumber(originalEndpointA.getPortNumber()) + .switchId(originalEndpointA.getSwitchId()) + .build()); + + target.setSharedEndpoint(FlowEndpoint.builder() + .innerVlanId(HaFlowHelper.SHARED_INNER_VLAN) + .outerVlanId(HaFlowHelper.SHARED_OUTER_VLAN) + .portNumber(HaFlowHelper.SHARED_PORT + 1) + .switchId(HaFlowHelper.SHARED_SWITCH.getSwitchId()) + .build()); + + assertEquals(ALL, EndpointUpdateType.determineUpdateType(original, target)); + } + + @Test + void whenNotPartial_updateNone() { + HaFlow original = HaFlowHelper.createHaFlow(); + HaFlowRequest target = HaFlowMapper.INSTANCE.toHaFlowRequest( + original, original.getDiverseGroupId(), HaFlowRequest.Type.UPDATE); + target.setAllocateProtectedPath(!target.isAllocateProtectedPath()); + + assertEquals(NONE, EndpointUpdateType.determineUpdateType(original, target)); + } + + @Test + void isPartialUpdateTest() { + assertFalse(NONE.isPartialUpdate()); + + assertTrue(ALL.isPartialUpdate()); + assertTrue(SUBFLOWS.isPartialUpdate()); + assertTrue(SOURCE.isPartialUpdate()); + assertTrue(DESTINATION.isPartialUpdate()); + assertTrue(SHARED.isPartialUpdate()); + } +} From f74963c2b780cbf921ad4168b601070926ca3794 Mon Sep 17 00:00:00 2001 From: Dmitrii Beliakov Date: Wed, 11 Oct 2023 09:09:30 +0200 Subject: [PATCH 2/3] Fix states relationship in HA-flow FSM --- .../fsm/haflow/update/HaFlowUpdateFsm.java | 24 ++++++++++++------- 1 file changed, 15 insertions(+), 9 deletions(-) diff --git a/src-java/flowhs-topology/flowhs-storm-topology/src/main/java/org/openkilda/wfm/topology/flowhs/fsm/haflow/update/HaFlowUpdateFsm.java b/src-java/flowhs-topology/flowhs-storm-topology/src/main/java/org/openkilda/wfm/topology/flowhs/fsm/haflow/update/HaFlowUpdateFsm.java index b29ebf9b1cd..c135263ae16 100644 --- a/src-java/flowhs-topology/flowhs-storm-topology/src/main/java/org/openkilda/wfm/topology/flowhs/fsm/haflow/update/HaFlowUpdateFsm.java +++ b/src-java/flowhs-topology/flowhs-storm-topology/src/main/java/org/openkilda/wfm/topology/flowhs/fsm/haflow/update/HaFlowUpdateFsm.java @@ -238,7 +238,7 @@ public Factory(@NonNull HaFlowGenericCarrier carrier, @NonNull Config config, .onEach(Event.TIMEOUT, Event.ERROR) .perform(new HandleNotCompletedCommandsAction<>()); - builder.transition().from(State.OLD_RULES_REMOVED) + builder.transition().from(State.DETERMINE_OLD_RESOURCE_REMOVAL_REQUIRED) .to(State.NOTIFY_FLOW_STATS_ON_REMOVED_PATHS).on(Event.NEXT) .perform(new NotifyHaFlowStatsOnRemovedPathsAction<>(persistenceManager, carrier)); @@ -296,13 +296,13 @@ public Factory(@NonNull HaFlowGenericCarrier carrier, @NonNull Config config, .perform(new HandleNotCompletedCommandsAction<>()); builder.transition().from(State.NEW_RULES_REVERTED) - .to(State.SKIP_REVERTING_RESOURCES_ALLOCATION) - .on(Event.NEXT); + .to(State.DETERMINE_RESOURCE_REVERTING_REQUIRED) + .on(Event.NEXT) + .perform(new SkipResourceManagementOnEndpointsUpdateAction(persistenceManager)); - builder.transitions().from(State.SKIP_REVERTING_RESOURCES_ALLOCATION) + builder.transitions().from(State.DETERMINE_RESOURCE_REVERTING_REQUIRED) .toAmong(State.REVERTING_ALLOCATED_RESOURCES, State.REVERTING_FLOW) - .onEach(Event.NEXT, Event.UPDATE_ENDPOINTS_ONLY) - .perform(new SkipResourceManagementOnEndpointsUpdateAction(persistenceManager)); + .onEach(Event.NEXT, Event.UPDATE_ENDPOINTS_ONLY); builder.onEntry(State.REVERTING_ALLOCATED_RESOURCES) .perform(reportErrorAction); @@ -351,10 +351,15 @@ public Factory(@NonNull HaFlowGenericCarrier carrier, @NonNull Config config, builder.transition() .from(State.OLD_RULES_REMOVED) - .to(State.UPDATING_FLOW_STATUS) - .on(Event.UPDATE_ENDPOINTS_ONLY) + .to(State.DETERMINE_OLD_RESOURCE_REMOVAL_REQUIRED) + .on(Event.NEXT) .perform(new SkipResourceManagementOnEndpointsUpdateAction(persistenceManager)); + builder.transition() + .from(State.DETERMINE_RESOURCE_REVERTING_REQUIRED) + .to(State.UPDATING_FLOW_STATUS) + .on(Event.UPDATE_ENDPOINTS_ONLY); + builder.defineFinalState(State.FINISHED) .addEntryAction(new OnFinishedAction(dashboardLogger)); builder.defineFinalState(State.FINISHED_WITH_ERROR) @@ -438,7 +443,8 @@ public enum State { NOTIFY_FLOW_STATS_ON_NEW_PATHS, NOTIFY_FLOW_STATS_ON_REMOVED_PATHS, - SKIP_REVERTING_RESOURCES_ALLOCATION + DETERMINE_RESOURCE_REVERTING_REQUIRED, + DETERMINE_OLD_RESOURCE_REMOVAL_REQUIRED } public enum Event { From c4e25eadab5da42eb87ecc3fbaa9671ff8412a1d Mon Sep 17 00:00:00 2001 From: Dmitrii Beliakov Date: Mon, 13 Nov 2023 18:09:02 +0100 Subject: [PATCH 3/3] Fix a typo in FSM. --- .../wfm/topology/flowhs/fsm/haflow/update/HaFlowUpdateFsm.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src-java/flowhs-topology/flowhs-storm-topology/src/main/java/org/openkilda/wfm/topology/flowhs/fsm/haflow/update/HaFlowUpdateFsm.java b/src-java/flowhs-topology/flowhs-storm-topology/src/main/java/org/openkilda/wfm/topology/flowhs/fsm/haflow/update/HaFlowUpdateFsm.java index c135263ae16..20911a9df45 100644 --- a/src-java/flowhs-topology/flowhs-storm-topology/src/main/java/org/openkilda/wfm/topology/flowhs/fsm/haflow/update/HaFlowUpdateFsm.java +++ b/src-java/flowhs-topology/flowhs-storm-topology/src/main/java/org/openkilda/wfm/topology/flowhs/fsm/haflow/update/HaFlowUpdateFsm.java @@ -356,7 +356,7 @@ public Factory(@NonNull HaFlowGenericCarrier carrier, @NonNull Config config, .perform(new SkipResourceManagementOnEndpointsUpdateAction(persistenceManager)); builder.transition() - .from(State.DETERMINE_RESOURCE_REVERTING_REQUIRED) + .from(State.DETERMINE_OLD_RESOURCE_REMOVAL_REQUIRED) .to(State.UPDATING_FLOW_STATUS) .on(Event.UPDATE_ENDPOINTS_ONLY);