From a683db770e56432ae7696366eed775da5d371e12 Mon Sep 17 00:00:00 2001 From: Ivan Chupin Date: Tue, 16 Jul 2024 12:52:33 +0200 Subject: [PATCH 1/7] Update git hook to allow allows additional delimiter "-" between prefix and main part of the branch name.(mostly for the release branch naming) --- .hooks/pre-commit | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.hooks/pre-commit b/.hooks/pre-commit index 28cd3cd7996..f1885527b2e 100755 --- a/.hooks/pre-commit +++ b/.hooks/pre-commit @@ -3,7 +3,7 @@ LC_ALL=C local_branch="$(git rev-parse --abbrev-ref HEAD)" -valid_branch_regex="^(docs|feature|fix|test|release|improvement|hotfix|chore)\/[a-z0-9._-]+$" +valid_branch_regex="^(docs|feature|fix|test|release|improvement|hotfix|chore)[\/-][a-z0-9._-]+$" message="There branch name is wrong. Branch names in this project must adhere to this contract: $valid_branch_regex. You should rename your branch to a valid name and try again." From 2d429e6396bf07fdd46ce8153cf7a3f759947132 Mon Sep 17 00:00:00 2001 From: Yuliia Miroshnychenko Date: Fri, 2 Aug 2024 13:16:17 +0200 Subject: [PATCH 2/7] [TEST]: Issue 5699: Flaky test: Flow with protected path(reroute) --- .../spec/flows/ProtectedPathSpec.groovy | 25 ++++++++++--------- 1 file changed, 13 insertions(+), 12 deletions(-) diff --git a/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/flows/ProtectedPathSpec.groovy b/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/flows/ProtectedPathSpec.groovy index 4eec25f35e8..9f692d07949 100644 --- a/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/flows/ProtectedPathSpec.groovy +++ b/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/flows/ProtectedPathSpec.groovy @@ -46,7 +46,6 @@ import groovy.util.logging.Slf4j import org.springframework.beans.factory.annotation.Autowired import org.springframework.http.HttpStatus import org.springframework.web.client.HttpClientErrorException -import spock.lang.Issue import spock.lang.Narrative import spock.lang.See import spock.lang.Shared @@ -1251,7 +1250,6 @@ class ProtectedPathSpec extends HealthCheckSpecification { "an unmetered" | 0 } - @Issue("https://github.com/telstra/open-kilda/issues/5699") @Tags(ISL_RECOVER_ON_FAIL) def "System doesn't reroute main flow path when protected path is broken and new alt path is available\ (altPath is more preferable than mainPath)"() { @@ -1264,29 +1262,32 @@ class ProtectedPathSpec extends HealthCheckSpecification { and: "All alternative paths are unavailable (bring ports down on the source switch)" def initialFlowPathInfo = flow.retrieveAllEntityPaths() def initialMainPath = initialFlowPathInfo.getPathNodes(Direction.FORWARD, false) - def mainPathIsl = initialFlowPathInfo.flowPath.getMainPathInvolvedIsls().first() + def mainPathIsl = initialFlowPathInfo.flowPath.getMainPathInvolvedIsls() def initialProtectedPath = initialFlowPathInfo.getPathNodes(Direction.FORWARD, true) - def protectedIslToBreak = initialFlowPathInfo.flowPath.getProtectedPathInvolvedIsls().first() - def broughtDownIsls = topology.getRelatedIsls(switchPair.src) - mainPathIsl - protectedIslToBreak + def protectedIslToBreak = initialFlowPathInfo.flowPath.getProtectedPathInvolvedIsls() + def broughtDownIsls = topology.getRelatedIsls(switchPair.src) - mainPathIsl.first() - protectedIslToBreak.first() islHelper.breakIsls(broughtDownIsls) and: "ISL on a protected path is broken(bring port down) for changing the flow state to DEGRADED" - islHelper.breakIsl(protectedIslToBreak) + islHelper.breakIsl(protectedIslToBreak.first()) + flow.waitForHistoryEvent(REROUTE_FAILED) Wrappers.wait(WAIT_OFFSET) { assert flow.retrieveFlowStatus().status == FlowState.DEGRADED } - when: "Make the current path less preferable than alternative path" + when: "Make the current paths(main and protected) less preferable than alternative path" def alternativePath = switchPair.paths.find { it != initialMainPath && it != initialProtectedPath } - def alternativeIsl = pathHelper.getInvolvedIsls(alternativePath)[0] - + def alternativeIsl = pathHelper.getInvolvedIsls(alternativePath) switchPair.paths.findAll { it != alternativePath }.each { pathHelper.makePathMorePreferable(alternativePath, it) } - assert northbound.getLink(mainPathIsl).cost > northbound.getLink(alternativeIsl).cost + + int alternativeIslCost = alternativeIsl.sum { northbound.getLink(it).cost } + assert mainPathIsl.sum { northbound.getLink(it).cost } > alternativeIslCost and: "Make alternative path available(bring port up on the source switch)" - islHelper.restoreIsl(alternativeIsl) + islHelper.restoreIsl(alternativeIsl.first()) - then: "Flow state is changed to UP" + then: "Reroute has been executed successfully and flow state is changed to UP" + flow.waitForHistoryEvent(REROUTE) Wrappers.wait(WAIT_OFFSET) { assert flow.retrieveFlowStatus().status == FlowState.UP } and: "Protected path is recalculated only" From 55c617b31200c3f2d75c65e93e49da48668ac045 Mon Sep 17 00:00:00 2001 From: Yuliia Miroshnychenko Date: Wed, 10 Jul 2024 12:26:37 +0200 Subject: [PATCH 3/7] [TEST]: Regular Flow: New interaction approach: Func-tests: Eliminating flowHelper(v1/v2) usage --- .../functionaltests/helpers/FlowHelper.groovy | 219 +----------------- .../helpers/FlowHelperV2.groovy | 206 ++-------------- .../helpers/HaPathHelper.groovy | 38 --- .../helpers/SwitchHelper.groovy | 16 ++ .../helpers/builder/FlowBuilder.groovy | 2 +- .../helpers/builder/HaFlowBuilder.groovy | 2 +- .../helpers/builder/YFlowBuilder.groovy | 9 +- .../helpers/model/FlowExtended.groovy | 50 +++- .../functionaltests/BaseSpecification.groovy | 6 - .../spec/flows/AutoRerouteSpec.groovy | 20 +- .../spec/flows/FlowCrudSpec.groovy | 8 +- .../spec/flows/MirrorEndpointsSpec.groovy | 3 +- .../spec/flows/ThrottlingRerouteSpec.groovy | 5 +- .../spec/flows/VxlanFlowSpec.groovy | 12 +- .../flows/haflows/HaFlowUpdateSpec.groovy | 7 +- .../spec/flows/yflows/SubFlowSpec.groovy | 7 +- .../flows/yflows/YFlowDiversitySpec.groovy | 8 +- .../spec/switches/LagPortSpec.groovy | 2 +- .../spec/switches/SwitchesFlowsV2Spec.groovy | 9 +- 19 files changed, 135 insertions(+), 494 deletions(-) delete mode 100644 src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/helpers/HaPathHelper.groovy diff --git a/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/helpers/FlowHelper.groovy b/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/helpers/FlowHelper.groovy index 73cd60167c7..a33089dac90 100644 --- a/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/helpers/FlowHelper.groovy +++ b/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/helpers/FlowHelper.groovy @@ -1,7 +1,9 @@ package org.openkilda.functionaltests.helpers -import com.github.javafaker.Faker -import groovy.util.logging.Slf4j +import static org.openkilda.functionaltests.model.cleanup.CleanupActionType.DELETE_FLOW +import static org.openkilda.testing.Constants.FLOW_CRUD_TIMEOUT +import static org.springframework.beans.factory.config.ConfigurableBeanFactory.SCOPE_PROTOTYPE + import org.openkilda.functionaltests.helpers.model.SwitchPair import org.openkilda.functionaltests.model.cleanup.CleanupAfter import org.openkilda.functionaltests.model.cleanup.CleanupManager @@ -10,33 +12,19 @@ import org.openkilda.messaging.payload.flow.FlowCreatePayload import org.openkilda.messaging.payload.flow.FlowEndpointPayload import org.openkilda.messaging.payload.flow.FlowPayload import org.openkilda.messaging.payload.flow.FlowState -import org.openkilda.messaging.payload.flow.PathNodePayload -import org.openkilda.messaging.payload.history.FlowHistoryEntry -import org.openkilda.model.SwitchId -import org.openkilda.northbound.dto.v2.flows.DetectConnectedDevicesV2 -import org.openkilda.northbound.dto.v2.flows.FlowEndpointV2 import org.openkilda.testing.model.topology.TopologyDefinition import org.openkilda.testing.model.topology.TopologyDefinition.Switch import org.openkilda.testing.service.database.Database import org.openkilda.testing.service.northbound.NorthboundService + +import com.github.javafaker.Faker +import groovy.util.logging.Slf4j import org.springframework.beans.factory.annotation.Autowired import org.springframework.beans.factory.annotation.Qualifier import org.springframework.context.annotation.Scope import org.springframework.stereotype.Component import java.text.SimpleDateFormat -import java.time.ZonedDateTime -import java.time.format.DateTimeFormatter - -import static groovyx.gpars.GParsPool.withPool -import static org.openkilda.functionaltests.helpers.FlowHistoryConstants.DELETE_SUCCESS -import static org.openkilda.functionaltests.model.cleanup.CleanupActionType.DELETE_FLOW -import static org.openkilda.testing.Constants.EGRESS_RULE_MULTI_TABLE_ID -import static org.openkilda.testing.Constants.FLOW_CRUD_TIMEOUT -import static org.openkilda.testing.Constants.INGRESS_RULE_MULTI_TABLE_ID -import static org.openkilda.testing.Constants.TRANSIT_RULE_MULTI_TABLE_ID -import static org.openkilda.testing.Constants.WAIT_OFFSET -import static org.springframework.beans.factory.config.ConfigurableBeanFactory.SCOPE_PROTOTYPE /** * Holds utility methods for manipulating flows. @@ -114,25 +102,6 @@ class FlowHelper { } as FlowCreatePayload } - /** - * Creates a FlowPayload instance with random vlan and flow id suitable for a single-switch flow. - * The flow will be on the same port. - */ - FlowPayload singleSwitchSinglePortFlow(Switch sw) { - def srcEndpoint = getFlowEndpoint(sw, []) - def dstEndpoint = getFlowEndpoint(sw, []).tap { it.portNumber = srcEndpoint.portNumber } - if (srcEndpoint.vlanId == dstEndpoint.vlanId) { //ensure same vlan is not randomly picked - dstEndpoint.vlanId-- - } - return FlowPayload.builder() - .id(generateFlowId()) - .source(srcEndpoint) - .destination(dstEndpoint) - .maximumBandwidth(500) - .description(generateDescription()) - .build() - } - /** * Adds flow with checking flow status and rules on source and destination switches. * It is supposed if rules are installed on source and destination switches, the flow is completely created. @@ -146,76 +115,6 @@ class FlowHelper { return response } - /** - * Sends flow create request but doesn't wait for flow to go up. - */ - FlowPayload attemptToAddFlow(FlowCreatePayload flow) { - def flowId = flow.getId() - cleanupManager.addAction(DELETE_FLOW, {flowHelperV2.safeDeleteFlow(flowId)}, CleanupAfter.TEST) - return northbound.addFlow(flow) - } - - - List "get ports that flow uses on switch from path" (String flowId, SwitchId switchId) { - def response = northbound.getFlowPath(flowId) - def paths = response.forwardPath + response.reversePath + (response.protectedPath as PathNodePayload) - return paths.findAll{it != null && it.getSwitchId() == switchId} - .inject([].toSet()) {result, i -> result + [i.inputPort, i.outputPort]}.asList() - } - - /** - * Deletes flow with checking rules on source and destination switches. - * It is supposed if rules absent on source and destination switches, the flow is completely deleted. - */ - FlowPayload deleteFlow(String flowId) { - Wrappers.wait(WAIT_OFFSET) { assert northbound.getFlowStatus(flowId).status != FlowState.IN_PROGRESS } - log.debug("Deleting flow '$flowId'") - def response = northbound.deleteFlow(flowId) - Wrappers.wait(FLOW_CRUD_TIMEOUT) { - assert !northbound.getFlowStatus(flowId) - assert northbound.getFlowHistory(flowId).find { it.payload.last().action == DELETE_SUCCESS } - } - return response - } - - /** - * Updates flow with checking flow status and rules on source and destination switches. - * It is supposed if rules are installed on source and destination switches, the flow is completely updated. - */ - FlowPayload updateFlow(String flowId, FlowPayload flow) { - log.debug("Updating flow '${flowId}'") - def response = northbound.updateFlow(flowId, flow) - Wrappers.wait(FLOW_CRUD_TIMEOUT) { assert northbound.getFlowStatus(flowId).status == FlowState.UP } - return response - } - - /** - * Returns last (the freshest) flow history entry - * @param flowId - * @return - */ - FlowHistoryEntry getLatestHistoryEntry(String flowId) { - return northbound.getFlowHistory(flowId).last() - } - - /** - * Returns the number of entries in flow history - * @param flowId - * @return - */ - int getHistorySize(String flowId) { - return northbound.getFlowHistory(flowId).size() - } - - //TODO: Switch to enum for action - List getHistoryEntriesByAction(String flowId, String action) { - return northbound.getFlowHistory(flowId).findAll {it.getAction() == action} - } - - FlowHistoryEntry getEarliestHistoryEntryByAction(String flowId, String action) { - return getHistoryEntriesByAction(flowId, action).first() - } - /** * Check whether given potential flow is conflicting with any of flows in the given list. * Usually used to ensure that some new flow is by accident is not conflicting with any of existing flows. @@ -235,103 +134,6 @@ class FlowHelper { } || existingFlows*.id.contains(newFlow.id) } - /** - * Converts a given FlowEndpointPayload object to FlowEndpointV2 object. - * - * @param endpoint FlowEndpointPayload object to convert - */ - static FlowEndpointV2 toFlowEndpointV2(FlowEndpointPayload endpoint) { - new FlowEndpointV2(endpoint.datapath, endpoint.portNumber, endpoint.vlanId, (endpoint.innerVlanId ?: 0), - toFlowConnectedDevicesV2(endpoint.detectConnectedDevices)) - } - - static DetectConnectedDevicesV2 toFlowConnectedDevicesV2(DetectConnectedDevicesPayload payload) { - new DetectConnectedDevicesV2(payload.lldp, payload.arp) - } - - /** - * Check that all needed rules are created for a flow with protected path.
- * Protected path creates the 'egress' rule only on the src and dst switches - * and creates 2 rules(input/output) on the transit switches.
- * if (switchId == src/dst): 2 rules for main flow path + 1 egress for protected path = 3
- * if (switchId != src/dst): 2 rules for main flow path + 2 rules for protected path = 4
- * - * @param flowId - */ - void verifyRulesOnProtectedFlow(String flowId) { - def flowPathInfo = northbound.getFlowPath(flowId) - def mainFlowPath = flowPathInfo.forwardPath - def srcMainSwitch = mainFlowPath[0] - def dstMainSwitch = mainFlowPath[-1] - def mainFlowTransitSwitches = (mainFlowPath.size() > 2) ? mainFlowPath[1..-2] : [] - def protectedFlowPath = flowPathInfo.protectedPath.forwardPath - def protectedFlowTransitSwitches = (protectedFlowPath.size() > 2) ? protectedFlowPath[1..-2] : [] - - def commonSwitches = mainFlowPath*.switchId.intersect(protectedFlowPath*.switchId) - def commonTransitSwitches = mainFlowTransitSwitches*.switchId.intersect(protectedFlowTransitSwitches*.switchId) - - def flowInfo = db.getFlow(flowId) - def mainForwardCookie = flowInfo.forwardPath.cookie.value - def mainReverseCookie = flowInfo.reversePath.cookie.value - def protectedForwardCookie = flowInfo.protectedForwardPath.cookie.value - def protectedReverseCookie = flowInfo.protectedReversePath.cookie.value - - def rulesOnSrcSwitch = northbound.getSwitchRules(srcMainSwitch.switchId).flowEntries - assert rulesOnSrcSwitch.find { - it.cookie == mainForwardCookie - }.tableId == INGRESS_RULE_MULTI_TABLE_ID - assert rulesOnSrcSwitch.find { - it.cookie == mainReverseCookie - }.tableId == EGRESS_RULE_MULTI_TABLE_ID - assert rulesOnSrcSwitch.find { - it.cookie == protectedReverseCookie - }.tableId == EGRESS_RULE_MULTI_TABLE_ID - assert !rulesOnSrcSwitch*.cookie.contains(protectedForwardCookie) - - def rulesOnDstSwitch = northbound.getSwitchRules(dstMainSwitch.switchId).flowEntries - assert rulesOnDstSwitch.find { - it.cookie == mainForwardCookie - }.tableId == EGRESS_RULE_MULTI_TABLE_ID - assert rulesOnDstSwitch.find { - it.cookie == mainReverseCookie - }.tableId == INGRESS_RULE_MULTI_TABLE_ID - assert rulesOnDstSwitch.find { - it.cookie == protectedForwardCookie - }.tableId == EGRESS_RULE_MULTI_TABLE_ID - assert !rulesOnDstSwitch*.cookie.contains(protectedReverseCookie) - - //this loop checks rules on common nodes(except src and dst switches) - withPool { - commonTransitSwitches.eachParallel { sw -> - def rules = northbound.getSwitchRules(sw).flowEntries - def transitTableId = TRANSIT_RULE_MULTI_TABLE_ID - assert rules.find { it.cookie == mainForwardCookie }.tableId == transitTableId - assert rules.find { it.cookie == mainReverseCookie }.tableId == transitTableId - assert rules.find { it.cookie == protectedForwardCookie }.tableId == transitTableId - assert rules.find { it.cookie == protectedReverseCookie }.tableId == transitTableId - } - } - - //this loop checks rules on unique transit nodes - withPool { - protectedFlowTransitSwitches.findAll { !commonSwitches.contains(it.switchId) }.eachParallel { node -> - def rules = northbound.getSwitchRules(node.switchId).flowEntries - def transitTableId = TRANSIT_RULE_MULTI_TABLE_ID - assert rules.find { it.cookie == protectedForwardCookie }.tableId == transitTableId - assert rules.find { it.cookie == protectedReverseCookie }.tableId == transitTableId - } - } - - //this loop checks rules on unique main nodes - withPool { - mainFlowTransitSwitches.findAll { !commonSwitches.contains(it.switchId) }.eachParallel { node -> - def rules = northbound.getSwitchRules(node.switchId).flowEntries - def transitTableId = TRANSIT_RULE_MULTI_TABLE_ID - assert rules.find { it.cookie == mainForwardCookie }.tableId == transitTableId - assert rules.find { it.cookie == mainReverseCookie }.tableId == transitTableId - } - } - } /** * Returns flow endpoint with randomly chosen vlan. @@ -381,11 +183,4 @@ class FlowHelper { def r = new Random() "autotest flow: ${descpription[r.nextInt(descpription.size())]}" } - - static Long convertStringTimestampIsoToLong(String timestampIso) { - def parsedRerouteActionStart = ZonedDateTime.parse( - timestampIso, DateTimeFormatter.ISO_OFFSET_DATE_TIME) - def timestampMillis = parsedRerouteActionStart.toInstant().toEpochMilli() - return timestampMillis - } } diff --git a/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/helpers/FlowHelperV2.groovy b/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/helpers/FlowHelperV2.groovy index 430b12ae5da..32e38617863 100644 --- a/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/helpers/FlowHelperV2.groovy +++ b/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/helpers/FlowHelperV2.groovy @@ -1,26 +1,33 @@ package org.openkilda.functionaltests.helpers -import com.github.javafaker.Faker -import groovy.util.logging.Slf4j +import static org.openkilda.functionaltests.helpers.FlowHistoryConstants.CREATE_SUCCESS +import static org.openkilda.functionaltests.helpers.FlowHistoryConstants.DELETE_SUCCESS +import static org.openkilda.functionaltests.helpers.SwitchHelper.randomVlan +import static org.openkilda.functionaltests.model.cleanup.CleanupActionType.DELETE_FLOW +import static org.openkilda.functionaltests.model.cleanup.CleanupAfter.TEST +import static org.openkilda.messaging.payload.flow.FlowState.IN_PROGRESS +import static org.openkilda.messaging.payload.flow.FlowState.UP +import static org.openkilda.testing.Constants.FLOW_CRUD_TIMEOUT +import static org.openkilda.testing.Constants.WAIT_OFFSET +import static org.springframework.beans.factory.config.ConfigurableBeanFactory.SCOPE_PROTOTYPE + import org.openkilda.functionaltests.helpers.model.SwitchPair import org.openkilda.functionaltests.model.cleanup.CleanupManager import org.openkilda.messaging.payload.flow.DetectConnectedDevicesPayload -import org.openkilda.messaging.payload.flow.FlowCreatePayload import org.openkilda.messaging.payload.flow.FlowEndpointPayload import org.openkilda.messaging.payload.flow.FlowPayload import org.openkilda.messaging.payload.flow.FlowState -import org.openkilda.model.FlowPathStatus import org.openkilda.northbound.dto.v2.flows.DetectConnectedDevicesV2 import org.openkilda.northbound.dto.v2.flows.FlowEndpointV2 -import org.openkilda.northbound.dto.v2.flows.FlowMirrorPointPayload -import org.openkilda.northbound.dto.v2.flows.FlowMirrorPointResponseV2 -import org.openkilda.northbound.dto.v2.flows.FlowPatchV2 import org.openkilda.northbound.dto.v2.flows.FlowRequestV2 import org.openkilda.northbound.dto.v2.flows.FlowResponseV2 import org.openkilda.testing.model.topology.TopologyDefinition import org.openkilda.testing.model.topology.TopologyDefinition.Switch import org.openkilda.testing.service.northbound.NorthboundService import org.openkilda.testing.service.northbound.NorthboundServiceV2 + +import com.github.javafaker.Faker +import groovy.util.logging.Slf4j import org.springframework.beans.factory.annotation.Autowired import org.springframework.beans.factory.annotation.Qualifier import org.springframework.context.annotation.Scope @@ -28,20 +35,6 @@ import org.springframework.stereotype.Component import java.text.SimpleDateFormat -import static FlowHistoryConstants.UPDATE_SUCCESS -import static org.openkilda.functionaltests.helpers.FlowHelper.KILDA_ALLOWED_VLANS -import static org.openkilda.functionaltests.helpers.FlowHistoryConstants.CREATE_MIRROR_SUCCESS -import static org.openkilda.functionaltests.helpers.FlowHistoryConstants.CREATE_SUCCESS -import static org.openkilda.functionaltests.helpers.FlowHistoryConstants.DELETE_SUCCESS -import static org.openkilda.functionaltests.helpers.FlowHistoryConstants.PARTIAL_UPDATE_ONLY_IN_DB -import static org.openkilda.functionaltests.model.cleanup.CleanupActionType.DELETE_FLOW -import static org.openkilda.functionaltests.model.cleanup.CleanupAfter.TEST -import static org.openkilda.messaging.payload.flow.FlowState.IN_PROGRESS -import static org.openkilda.messaging.payload.flow.FlowState.UP -import static org.openkilda.testing.Constants.FLOW_CRUD_TIMEOUT -import static org.openkilda.testing.Constants.WAIT_OFFSET -import static org.springframework.beans.factory.config.ConfigurableBeanFactory.SCOPE_PROTOTYPE - /** * Holds utility methods for manipulating flows supporting version 2 of API. */ @@ -134,30 +127,6 @@ class FlowHelperV2 { } as FlowRequestV2 } - FlowRequestV2 singleSwitchFlow(SwitchPair switchPair, boolean useTraffgenPorts = true, - List existingFlows = []) { - singleSwitchFlow(switchPair.src, useTraffgenPorts, existingFlows) - } - - /** - * Creates a FlowPayload instance with random vlan and flow id suitable for a single-switch flow. - * The flow will be on the same port. - */ - FlowRequestV2 singleSwitchSinglePortFlow(Switch sw) { - def srcEndpoint = getFlowEndpoint(sw, []) - def dstEndpoint = getFlowEndpoint(sw, []).tap { it.portNumber = srcEndpoint.portNumber } - if (srcEndpoint.vlanId == dstEndpoint.vlanId) { //ensure same vlan is not randomly picked - dstEndpoint.vlanId-- - } - return FlowRequestV2.builder() - .flowId(generateFlowId()) - .source(srcEndpoint) - .destination(dstEndpoint) - .maximumBandwidth(500) - .description(generateDescription()) - .build() - } - /** * Adds flow and waits for it to become in expected state ('Up' by default) */ @@ -175,21 +144,6 @@ class FlowHelperV2 { return response } - /** - * Adds flow and waits for it to become in expected state ('Up' by default) - */ - FlowResponseV2 addFlow(FlowCreatePayload flow, FlowState expectedFlowState = UP) { - return addFlow(toV2(flow), expectedFlowState); - } - - /** - * Sends flow create request but doesn't wait for flow to go up. - */ - FlowResponseV2 attemptToAddFlow(FlowRequestV2 flow) { - def flowId = flow.getFlowId() - cleanupManager.addAction(DELETE_FLOW, {safeDeleteFlow(flowId)}, TEST) - return northboundV2.addFlow(flow) - } /** * Sends delete request for flow and waits for that flow to disappear from flows list @@ -211,64 +165,11 @@ class FlowHelperV2 { } } - /** - * Updates flow and waits for it to become UP - */ - FlowResponseV2 updateFlow(String flowId, FlowRequestV2 flow) { - log.debug("Updating flow '${flowId}'") - def response = northboundV2.updateFlow(flowId, flow) - Wrappers.wait(FLOW_CRUD_TIMEOUT) { - assert northboundV2.getFlowStatus(flowId).status == UP - assert northbound.getFlowHistory(flowId).last().payload.last().action == UPDATE_SUCCESS - } - return response - } - - /** - * Updates flow and waits for it to become UP - */ - FlowResponseV2 updateFlow(String flowId, FlowCreatePayload flow) { - return updateFlow(flowId, toV2(flow)) - } - - FlowResponseV2 partialUpdate(String flowId, FlowPatchV2 flow, boolean isUpdateConsecutive = true) { - log.debug("Updating flow '${flowId}'(partial update)") - String action = isUpdateConsecutive ? UPDATE_SUCCESS : PARTIAL_UPDATE_ONLY_IN_DB - def response = northboundV2.partialUpdate(flowId, flow) - Wrappers.wait(FLOW_CRUD_TIMEOUT) { - assert northboundV2.getFlowStatus(flowId).status == UP - assert northbound.getFlowHistory(flowId).last().payload.last().action == action - } - return response - } - - FlowMirrorPointResponseV2 createMirrorPoint(String flowId, FlowMirrorPointPayload mirrorPoint) { - def response = northboundV2.createMirrorPoint(flowId, mirrorPoint) - Wrappers.wait(FLOW_CRUD_TIMEOUT) { - assert northboundV2.getFlow(flowId).mirrorPointStatuses[0].status == - FlowPathStatus.ACTIVE.toString().toLowerCase() - assert northbound.getFlowHistory(flowId).last().payload.last().action == CREATE_MIRROR_SUCCESS - } - return response - } - String generateFlowId() { return new SimpleDateFormat("ddMMMHHmmss_SSS", Locale.US).format(new Date()) + "_" + faker.food().ingredient().toLowerCase().replaceAll(/\W/, "") + faker.number().digits(4) } - static int randomVlan() { - return randomVlan([]) - } - - static int randomVlan(List exclusions) { - return (KILDA_ALLOWED_VLANS - exclusions).shuffled().first() - } - - static List availableVlanList(List exclusions) { - return (KILDA_ALLOWED_VLANS - exclusions).shuffled() - } - /** * Check whether given potential flow is conflicting with any of flows in the given list. * Usually used to ensure that some new flow is by accident is not conflicting with any of existing flows. @@ -288,27 +189,8 @@ class FlowHelperV2 { } || existingFlows*.flowId.contains(newFlow.flowId) } - static FlowPayload toV1(FlowRequestV2 flow) { - FlowPayload.builder() - .id(flow.flowId) - .description(flow.description) - .maximumBandwidth(flow.maximumBandwidth) - .ignoreBandwidth(flow.ignoreBandwidth) - .allocateProtectedPath(flow.allocateProtectedPath) - .periodicPings(flow.periodicPings) - .encapsulationType(flow.encapsulationType) - .maxLatency(flow.maxLatency) - .pinned(flow.pinned) - .priority(flow.priority) - .source(toV1(flow.source)) - .destination(toV1(flow.destination)) - .build() - } - static FlowEndpointPayload toV1(FlowEndpointV2 ep) { - new FlowEndpointPayload(ep.switchId, ep.portNumber, ep.vlanId, ep.getInnerVlanId(), - new DetectConnectedDevicesPayload(false, false)) - } + static FlowRequestV2 toV2(FlowPayload flow) { FlowRequestV2.builder() @@ -327,12 +209,6 @@ class FlowHelperV2 { .build() } - static FlowRequestV2 toV2(FlowCreatePayload flow) { - def result = toV2((FlowPayload) flow); - result.setDiverseFlowId(flow.getDiverseFlowId()); - return result; - } - static FlowEndpointV2 toV2(FlowEndpointPayload ep) { FlowEndpointV2.builder() .switchId(ep.getSwitchDpId()) @@ -346,58 +222,6 @@ class FlowHelperV2 { new DetectConnectedDevicesV2(payload.lldp, payload.arp) } - static FlowRequestV2 toRequest(FlowResponseV2 flow) { - return FlowRequestV2.builder() - .flowId(flow.flowId) - .source(flow.source) - .destination(flow.destination) - .maximumBandwidth(flow.maximumBandwidth) - .ignoreBandwidth(flow.ignoreBandwidth) - .periodicPings(flow.periodicPings) - .description(flow.description) - .maxLatency(flow.maxLatency) - .maxLatencyTier2(flow.maxLatencyTier2) - .priority(flow.priority) - .pinned(flow.pinned) - .allocateProtectedPath(flow.allocateProtectedPath) - .encapsulationType(flow.encapsulationType) - .pathComputationStrategy(flow.pathComputationStrategy) - .build() - } - - int getFlowRulesCountBySwitch(FlowResponseV2 flow, boolean isForward, int involvedSwitchesCount) { - def endpoint = isForward ? flow.source : flow.destination - def swProps = northbound.getSwitchProperties(endpoint.getSwitchId()) - def featureToggles = northbound.getFeatureToggles() - int count = involvedSwitchesCount-1; - def server42 = swProps.server42FlowRtt && featureToggles.server42FlowRtt - && flow.source.switchId != flow.destination.switchId - - - count++ // customer input rule - - if (endpoint.vlanId != 0) { - count++; // pre ingress rule - } - count++ // multi table ingress rule - - if (server42) { - if (endpoint.vlanId != 0) { - count++ //shared server42 rule - } - count++ // ingress server42 rule - count++ // server42 input rule - } - - if (swProps.switchLldp || endpoint.detectConnectedDevices.lldp) { - count++ // lldp rule - } - if (swProps.switchArp || endpoint.detectConnectedDevices.arp) { - count++ // arp rule - } - return count - } - /** * Returns flow endpoint with randomly chosen vlan. * diff --git a/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/helpers/HaPathHelper.groovy b/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/helpers/HaPathHelper.groovy deleted file mode 100644 index 3ad179097d2..00000000000 --- a/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/helpers/HaPathHelper.groovy +++ /dev/null @@ -1,38 +0,0 @@ -package org.openkilda.functionaltests.helpers - -import groovy.util.logging.Slf4j -import org.openkilda.northbound.dto.v2.haflows.HaFlowPaths -import org.openkilda.testing.model.topology.TopologyDefinition.Isl -import org.springframework.beans.factory.annotation.Autowired -import org.springframework.context.annotation.Scope -import org.springframework.stereotype.Component - -import static org.springframework.beans.factory.config.ConfigurableBeanFactory.SCOPE_PROTOTYPE -import static org.openkilda.functionaltests.helpers.PathHelper.convert - -/** - * Holds utility methods for working with flow paths. - */ -@Component -@Slf4j -@Scope(SCOPE_PROTOTYPE) -class HaPathHelper{ - @Autowired - PathHelper pathHelper - - Set "get common ISLs"(HaFlowPaths haFlowPaths1, HaFlowPaths haFlowPaths2) { - return getInvolvedIsls(haFlowPaths1).intersect(getInvolvedIsls(haFlowPaths2)) - } - - Set getInvolvedIsls(HaFlowPaths haFlowPaths) { - return haFlowPaths.getSubFlowPaths() - .collect { - def isls = pathHelper.getInvolvedIsls(convert(it.getForward())) - if (it.protectedPath) { - isls += pathHelper.getInvolvedIsls(convert(it.protectedPath.forward)) - } - isls - } - .flatten() as Set - } -} diff --git a/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/helpers/SwitchHelper.groovy b/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/helpers/SwitchHelper.groovy index 746ae920009..924c5c1b3c5 100644 --- a/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/helpers/SwitchHelper.groovy +++ b/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/helpers/SwitchHelper.groovy @@ -126,6 +126,10 @@ class SwitchHelper { static NOVIFLOW_BURST_COEFFICIENT = 1.005 // Driven by the Noviflow specification static CENTEC_MIN_BURST = 1024 // Driven by the Centec specification static CENTEC_MAX_BURST = 32000 // Driven by the Centec specification + + //Kilda allows user to pass reserved VLAN IDs 1 and 4095 if they want. + static final IntRange KILDA_ALLOWED_VLANS = 1..4095 + @Value('${burst.coefficient}') double burstCoefficient @Value('${discovery.generic.interval}') @@ -927,4 +931,16 @@ class SwitchHelper { def isServer42 = swProps.server42FlowRtt && featureToggles.server42FlowRtt return isServer42 } + + static int randomVlan() { + return randomVlan([]) + } + + static int randomVlan(List exclusions) { + return (KILDA_ALLOWED_VLANS - exclusions).shuffled().first() + } + + static List availableVlanList(List exclusions) { + return (KILDA_ALLOWED_VLANS - exclusions).shuffled() + } } \ No newline at end of file diff --git a/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/helpers/builder/FlowBuilder.groovy b/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/helpers/builder/FlowBuilder.groovy index f0a5460a3ed..4c8991a0e5c 100644 --- a/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/helpers/builder/FlowBuilder.groovy +++ b/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/helpers/builder/FlowBuilder.groovy @@ -1,9 +1,9 @@ package org.openkilda.functionaltests.helpers.builder -import static org.openkilda.functionaltests.helpers.FlowHelperV2.randomVlan import static org.openkilda.functionaltests.helpers.FlowNameGenerator.FLOW import static org.openkilda.functionaltests.helpers.StringGenerator.generateDescription import static org.openkilda.functionaltests.helpers.SwitchHelper.getRandomAvailablePort +import static org.openkilda.functionaltests.helpers.SwitchHelper.randomVlan import org.openkilda.functionaltests.helpers.model.FlowEncapsulationType import org.openkilda.functionaltests.helpers.model.FlowExtended diff --git a/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/helpers/builder/HaFlowBuilder.groovy b/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/helpers/builder/HaFlowBuilder.groovy index 8d4a566f5f0..ce0ae9b0ca6 100644 --- a/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/helpers/builder/HaFlowBuilder.groovy +++ b/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/helpers/builder/HaFlowBuilder.groovy @@ -1,9 +1,9 @@ package org.openkilda.functionaltests.helpers.builder -import static org.openkilda.functionaltests.helpers.FlowHelperV2.randomVlan import static org.openkilda.functionaltests.helpers.FlowNameGenerator.HA_FLOW import static org.openkilda.functionaltests.helpers.StringGenerator.generateDescription import static org.openkilda.functionaltests.helpers.SwitchHelper.getRandomAvailablePort +import static org.openkilda.functionaltests.helpers.SwitchHelper.randomVlan import org.openkilda.functionaltests.helpers.model.FlowEncapsulationType import org.openkilda.functionaltests.helpers.model.HaFlowExtended diff --git a/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/helpers/builder/YFlowBuilder.groovy b/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/helpers/builder/YFlowBuilder.groovy index ff041ece769..00f9074f65d 100644 --- a/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/helpers/builder/YFlowBuilder.groovy +++ b/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/helpers/builder/YFlowBuilder.groovy @@ -1,10 +1,11 @@ package org.openkilda.functionaltests.helpers.builder +import static org.openkilda.functionaltests.helpers.SwitchHelper.availableVlanList +import static org.openkilda.functionaltests.helpers.SwitchHelper.randomVlan + import org.openkilda.functionaltests.model.cleanup.CleanupManager import org.openkilda.northbound.dto.v2.yflows.SubFlow -import static org.openkilda.functionaltests.helpers.FlowHelperV2.availableVlanList -import static org.openkilda.functionaltests.helpers.FlowHelperV2.randomVlan import static org.openkilda.functionaltests.helpers.FlowNameGenerator.Y_FLOW import static org.openkilda.functionaltests.helpers.StringGenerator.generateDescription import static org.openkilda.functionaltests.helpers.SwitchHelper.getRandomAvailablePort @@ -22,7 +23,6 @@ import org.openkilda.testing.model.topology.TopologyDefinition import org.openkilda.testing.service.northbound.NorthboundService import org.openkilda.testing.service.northbound.NorthboundServiceV2 -import com.github.javafaker.Faker import groovy.util.logging.Slf4j @Slf4j @@ -30,9 +30,6 @@ class YFlowBuilder { YFlowExtended yFlow - static def random = new Random() - static def faker = new Faker() - YFlowBuilder(SwitchTriplet swT, NorthboundService northbound, NorthboundServiceV2 northboundV2, diff --git a/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/helpers/model/FlowExtended.groovy b/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/helpers/model/FlowExtended.groovy index 0fdc2c15b7d..1d716a1bcf1 100644 --- a/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/helpers/model/FlowExtended.groovy +++ b/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/helpers/model/FlowExtended.groovy @@ -1,8 +1,8 @@ package org.openkilda.functionaltests.helpers.model import static groovyx.gpars.GParsPool.withPool -import static org.openkilda.functionaltests.helpers.FlowHelperV2.randomVlan import static org.openkilda.functionaltests.helpers.FlowNameGenerator.FLOW +import static org.openkilda.functionaltests.helpers.SwitchHelper.randomVlan import static org.openkilda.functionaltests.helpers.Wrappers.wait import static org.openkilda.functionaltests.model.cleanup.CleanupActionType.DELETE_FLOW import static org.openkilda.functionaltests.model.cleanup.CleanupActionType.OTHER @@ -256,6 +256,7 @@ class FlowExtended { .diverseFlowId(diverseWith ? diverseWith.first() : null) .build() cleanupManager.addAction(DELETE_FLOW, { delete() }, cleanupAfter) + log.debug("Adding flow '$flowId'") def flow = northboundV2.addFlow(flowRequest) return new FlowExtended(flow, northbound, northboundV2, topologyDefinition, cleanupManager, database) } @@ -263,6 +264,7 @@ class FlowExtended { FlowExtended createV1(FlowState expectedState = FlowState.UP, CleanupAfter cleanupAfter = TEST) { def flowRequest = convertToFlowCreatePayload() cleanupManager.addAction(DELETE_FLOW, { delete() }, cleanupAfter) + log.debug("Adding flow '$flowId'") northbound.addFlow(flowRequest) waitForBeingInState(expectedState) } @@ -270,6 +272,7 @@ class FlowExtended { FlowExtended sendCreateRequestV1(CleanupAfter cleanupAfter = TEST) { def flowRequest = convertToFlowPayload() cleanupManager.addAction(DELETE_FLOW, { delete() }, cleanupAfter) + log.debug("Adding flow '$flowId'") def flow = northbound.addFlow(flowRequest) return new FlowExtended(flow, northbound, northboundV2, topologyDefinition, cleanupManager, database) } @@ -340,33 +343,35 @@ class FlowExtended { } FlowEntityPath retrieveAllEntityPaths() { + log.debug("Getting Flow path for '$flowId'") FlowPathPayload flowPath = northbound.getFlowPath(flowId) new FlowEntityPath(flowPath, topologyDefinition) } def retrieveDetails() { - log.debug("Get Flow '$flowId' details") + log.debug("Getting Flow '$flowId' details") def flow = northboundV2.getFlow(flowId) return new FlowExtended(flow, northbound, northboundV2, topologyDefinition, cleanupManager, database) } def retrieveDetailsFromDB() { + log.debug("Getting DB details for Flow '$flowId'") database.getFlow(flowId) } def retrieveDetailsV1() { - log.debug("Get Flow '$flowId' details") + log.debug("Getting Flow '$flowId' details") def flow = northbound.getFlow(flowId) return new FlowExtended(flow, northbound, northboundV2, topologyDefinition, cleanupManager, database) } FlowIdStatusPayload retrieveFlowStatus() { - log.debug("Get Flow '$flowId' status") + log.debug("Getting Flow '$flowId' status") return northboundV2.getFlowStatus(flowId) } FlowHistory retrieveFlowHistory(Long timeFrom = null , Long timeTo = null) { - log.debug("Get Flow '$flowId' history details") + log.debug("Getting Flow '$flowId' history details") new FlowHistory(northbound.getFlowHistory(flowId, timeFrom, timeTo)) } @@ -375,17 +380,19 @@ class FlowExtended { } List retrieveFlowHistoryStatus(Long timeFrom = null, Long timeTo = null, Integer maxCount = null) { + log.debug("Getting '$flowId' Flow history status") northboundV2.getFlowHistoryStatuses(flowId, timeFrom, timeTo, maxCount).historyStatuses.collect { new FlowHistoryStatus(it.timestamp, it.statusBecome) } } List retrieveFlowHistoryStatus(Integer maxCount) { + log.debug("Getting '$flowId' Flow history status") retrieveFlowHistoryStatus(null, null, maxCount) } FlowMirrorPointsResponseV2 retrieveMirrorPoints() { - log.debug("Get Flow '$flowId' mirror points") + log.debug("Getting Flow '$flowId' mirror points") return northboundV2.getMirrorPoints(flowId) } @@ -395,11 +402,12 @@ class FlowExtended { } List validate() { - log.debug("Validate Flow '$flowId'") + log.debug("Validating Flow '$flowId'") northbound.validateFlow(flowId) } FlowRerouteResponseV2 reroute() { + log.debug("Rerouting Flow '$flowId'") northboundV2.rerouteFlow(flowId) } @@ -412,6 +420,7 @@ class FlowExtended { } FlowReroutePayload sync() { + log.debug("Sync Flow '$flowId'") northbound.synchronizeFlow(flowId) } @@ -421,6 +430,7 @@ class FlowExtended { } FlowExtended sendUpdateRequest(FlowExtended expectedEntity) { + log.debug("Updating Flow '$flowId'") def response = northboundV2.updateFlow(flowId, expectedEntity.convertToUpdate()) return new FlowExtended(response, northbound, northboundV2, topologyDefinition, cleanupManager, database) } @@ -431,6 +441,7 @@ class FlowExtended { } FlowExtended updateV1(FlowExtended expectedEntity, FlowState flowState = FlowState.UP) { + log.debug("Updating(V1) Flow '$flowId'") northbound.updateFlow(flowId, expectedEntity.convertToFlowPayload()) return waitForBeingInState(flowState) } @@ -441,10 +452,12 @@ class FlowExtended { } FlowReroutePayload rerouteV1() { + log.debug("Rerouting(V1) Flow '$flowId'") return northbound.rerouteFlow(flowId) } FlowExtended sendPartialUpdateRequest(FlowPatchV2 updateRequest) { + log.debug("Partitial updating Flow '$flowId'") def response = northboundV2.partialUpdate(flowId, updateRequest) return new FlowExtended(response, northbound, northboundV2, topologyDefinition, cleanupManager, database) @@ -456,16 +469,19 @@ class FlowExtended { } FlowExtended sendPartialUpdateRequestV1(FlowPatchDto updateRequest) { + log.debug("Partitial updating(V1) Flow '$flowId'") def response = northbound.partialUpdate(flowId, updateRequest) return new FlowExtended(response, northbound, northboundV2, topologyDefinition, cleanupManager, database) } void updateFlowBandwidthInDB(long newBandwidth) { + log.debug("Updating Flow '$flowId' bandwidth in DB") database.updateFlowBandwidth(flowId, newBandwidth) } void updateFlowMeterIdInDB(long newMeterId) { + log.debug("Updating Flow '$flowId' meter in DB") database.updateFlowMeterId(flowId, newMeterId) } @@ -524,19 +540,23 @@ class FlowExtended { This method swaps the main path with the protected path */ FlowExtended swapFlowPath() { + log.debug("Swapping Flow '$flowId' path") def flow = northbound.swapFlowPath(flowId) new FlowExtended(flow, northbound, northboundV2, topologyDefinition, cleanupManager, database) } FlowLoopResponse createFlowLoop(SwitchId switchId) { + log.debug("Creating loop Flow '$flowId'") northboundV2.createFlowLoop(flowId, new FlowLoopPayload(switchId)) } List retrieveFlowLoop(SwitchId switchId = null) { + log.debug("Getting loop Flow '$flowId'") northboundV2.getFlowLoop(flowId, switchId) } FlowLoopResponse deleteFlowLoop() { + log.debug("Deleting loop Flow '$flowId'") northboundV2.deleteFlowLoop(flowId) } @@ -561,6 +581,7 @@ class FlowExtended { } FlowMeterEntries resetMeters() { + log.debug("Resetting meters Flow '$flowId'") northbound.resetMeters(flowId) } @@ -590,7 +611,7 @@ class FlowExtended { wait(WAIT_OFFSET) { assert northbound.getFlowStatus(flowId).status != FlowState.IN_PROGRESS } } - log.debug("Deleting flow '$flowId'") + log.debug("Deleting Flow '$flowId'") def response = northbound.deleteFlow(flowId) wait(FLOW_CRUD_TIMEOUT) { assert !northbound.getFlowStatus(flowId) @@ -604,6 +625,7 @@ class FlowExtended { * Sends delete request for flow without actual clarification of flow deletion */ FlowResponseV2 sendDeleteRequest() { + log.debug("Deleting Flow '$flowId'") northboundV2.deleteFlow(flowId) } @@ -611,6 +633,7 @@ class FlowExtended { * Sends delete request for flow without actual clarification of flow deletion (API.V1) */ FlowPayload sendDeleteRequestV1() { + log.debug("Deleting(V1) Flow '$flowId'") northbound.deleteFlow(flowId) } @@ -619,6 +642,7 @@ class FlowExtended { * @param since can be used to get details about connected devices starting from a specific time */ FlowConnectedDevicesResponse retrieveConnectedDevices(String since = null) { + log.debug("Getting connected devices Flow '$flowId'") northbound.getFlowConnectedDevices(flowId, since) } @@ -780,8 +804,16 @@ class FlowExtended { assertions.verify() } + /** + * Check that all needed rules are created for a flow with protected path.
+ * Protected path creates the 'egress' rule only on the src and dst switches + * and creates 2 rules(input/output) on the transit switches.
+ * if (switchId == src/dst): 2 rules for main flow path + 1 egress for protected path = 3
+ * if (switchId != src/dst): 2 rules for main flow path + 2 rules for protected path = 4
+ * + * @param flowInvolvedSwitchesWithRules (map of switch-rules data for further verification) + */ void verifyRulesForProtectedFlowOnSwitches(HashMap> flowInvolvedSwitchesWithRules) { - def flowDBInfo = retrieveDetailsFromDB() long mainForwardCookie = flowDBInfo.forwardPath.cookie.value long mainReverseCookie = flowDBInfo.reversePath.cookie.value diff --git a/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/BaseSpecification.groovy b/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/BaseSpecification.groovy index 242e8837f9b..0afae820daa 100644 --- a/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/BaseSpecification.groovy +++ b/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/BaseSpecification.groovy @@ -11,8 +11,6 @@ import org.openkilda.functionaltests.model.cleanup.CleanupManager import static groovyx.gpars.GParsPool.withPool import static org.junit.jupiter.api.Assumptions.assumeTrue -import org.openkilda.functionaltests.helpers.FlowHelper -import org.openkilda.functionaltests.helpers.FlowHelperV2 import org.openkilda.functionaltests.helpers.PathHelper import org.openkilda.functionaltests.helpers.PortAntiflapHelper import org.openkilda.functionaltests.helpers.StatsHelper @@ -61,8 +59,6 @@ class BaseSpecification extends Specification { @Autowired @Shared IslUtils islUtils @Autowired @Shared - FlowHelper flowHelper - @Autowired @Shared TopologyHelper topologyHelper @Autowired @Shared PathHelper pathHelper @@ -74,8 +70,6 @@ class BaseSpecification extends Specification { @Autowired @Shared @Qualifier("islandNbV2") NorthboundServiceV2 northboundV2 @Autowired @Shared - FlowHelperV2 flowHelperV2 - @Autowired @Shared StatsHelper statsHelper @Autowired @Shared SwitchPairs switchPairs diff --git a/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/flows/AutoRerouteSpec.groovy b/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/flows/AutoRerouteSpec.groovy index 0f3c99bab11..7a8c92ca81c 100644 --- a/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/flows/AutoRerouteSpec.groovy +++ b/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/flows/AutoRerouteSpec.groovy @@ -25,7 +25,7 @@ import org.openkilda.functionaltests.helpers.factory.FlowFactory import org.openkilda.functionaltests.helpers.model.FlowExtended import org.openkilda.functionaltests.helpers.model.FlowHistoryEventExtension import org.openkilda.functionaltests.helpers.model.SwitchPortVlan -import org.openkilda.messaging.info.event.IslChangeType +import org.openkilda.functionaltests.model.stats.Direction import org.openkilda.messaging.info.event.PathNode import org.openkilda.messaging.info.event.SwitchChangeType import org.openkilda.messaging.payload.flow.FlowState @@ -84,18 +84,17 @@ class AutoRerouteSpec extends HealthCheckSpecification { def "Strict bandwidth true: Flow status is set to DOWN after reroute if no alternative path with enough bandwidth"() { given: "A flow with one alternative path at least" def switchPair = switchPairs.all().neighbouring().withAtLeastNPaths(2).random() - List> allFlowPaths = switchPair.paths FlowExtended flow = flowFactory.getBuilder(switchPair) .withStrictBandwidth(true).build() .create() def initialPath = flow.retrieveAllEntityPaths() + def flowIsls = initialPath.flowPath.getInvolvedIsls(Direction.FORWARD) + initialPath.flowPath.getInvolvedIsls(Direction.REVERSE) and: "Alt path ISLs have not enough bandwidth to host the flow" - def altPaths = allFlowPaths.findAll { it != initialPath.getPathNodes() } - def flowIsls = initialPath.flowPath.getInvolvedIsls() - def altIsls = altPaths.collectMany { pathHelper.getInvolvedIsls(it).findAll { !(it in flowIsls || it.reversed in flowIsls) } } - .unique { a, b -> (a == b || a == b.reversed) ? 0 : 1 } + def altIsls = topology.getRelatedIsls(switchPair.src) + topology.getRelatedIsls(switchPair.dst) + altIsls.removeAll(flowIsls) + List busyEndpoints = flow.occupiedEndpoints() altIsls.each { isl -> def linkProp = islUtils.toLinkProps(isl, [cost: "1"]) @@ -107,11 +106,12 @@ class AutoRerouteSpec extends HealthCheckSpecification { northbound.deleteLinkProps([linkProp]) } + altIsls.each { + assert northbound.getLink(it).availableBandwidth < flow.maximumBandwidth + } + when: "Fail a flow ISL (bring switch port down)" - Set altFlowIsls = [] - allFlowPaths.findAll { it != initialPath.getPathNodes() } - .each { altFlowIsls.addAll(pathHelper.getInvolvedIsls(it)) } - def islToFail = flowIsls.find { !(it in altFlowIsls) && !(it.reversed in altFlowIsls) } + def islToFail = flowIsls.first() islHelper.breakIsl(islToFail) then: "Flow history shows 3 retry attempts, eventually bringing flow to Down" diff --git a/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/flows/FlowCrudSpec.groovy b/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/flows/FlowCrudSpec.groovy index c993dc74826..805baeee9ee 100644 --- a/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/flows/FlowCrudSpec.groovy +++ b/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/flows/FlowCrudSpec.groovy @@ -1,5 +1,7 @@ package org.openkilda.functionaltests.spec.flows +import org.openkilda.messaging.info.rule.FlowEntry + import groovy.util.logging.Slf4j import org.openkilda.functionaltests.HealthCheckSpecification import org.openkilda.functionaltests.error.flow.FlowNotCreatedExpectedError @@ -716,7 +718,11 @@ Failed to find path with requested bandwidth=${IMPOSSIBLY_HIGH_BANDWIDTH}/) flowInfo.statusDetails and: "Rules for main and protected paths are created" - wait(WAIT_OFFSET) { flowHelper.verifyRulesOnProtectedFlow(flow.flowId) } + wait(WAIT_OFFSET) { + HashMap> flowInvolvedSwitchesWithRules = flowPathInfo.getInvolvedSwitches() + .collectEntries{ [(it): switchRulesFactory.get(it).getRules()] } as HashMap> + flow.verifyRulesForProtectedFlowOnSwitches(flowInvolvedSwitchesWithRules) + } and: "Validation of flow must be successful" flow.validateAndCollectDiscrepancies().isEmpty() diff --git a/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/flows/MirrorEndpointsSpec.groovy b/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/flows/MirrorEndpointsSpec.groovy index 6a6880b9189..1c996c536a4 100644 --- a/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/flows/MirrorEndpointsSpec.groovy +++ b/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/flows/MirrorEndpointsSpec.groovy @@ -8,8 +8,8 @@ import static org.openkilda.functionaltests.extension.tags.Tag.LOW_PRIORITY import static org.openkilda.functionaltests.extension.tags.Tag.SMOKE import static org.openkilda.functionaltests.extension.tags.Tag.SMOKE_SWITCHES import static org.openkilda.functionaltests.extension.tags.Tag.TOPOLOGY_DEPENDENT -import static org.openkilda.functionaltests.helpers.FlowHelperV2.randomVlan import static org.openkilda.functionaltests.helpers.FlowNameGenerator.FLOW +import static org.openkilda.functionaltests.helpers.SwitchHelper.randomVlan import static org.openkilda.functionaltests.model.cleanup.CleanupActionType.OTHER import static org.openkilda.functionaltests.model.stats.FlowStatsMetric.FLOW_RAW_BYTES import static org.openkilda.testing.Constants.WAIT_OFFSET @@ -23,7 +23,6 @@ import org.openkilda.functionaltests.error.flowmirror.FlowMirrorPointNotCreatedE import org.openkilda.functionaltests.error.flowmirror.FlowMirrorPointNotCreatedWithConflictExpectedError import org.openkilda.functionaltests.error.switchproperties.SwitchPropertiesNotUpdatedExpectedError import org.openkilda.functionaltests.extension.tags.Tags -import org.openkilda.functionaltests.helpers.FlowHistoryConstants import org.openkilda.functionaltests.helpers.Wrappers import org.openkilda.functionaltests.helpers.factory.FlowFactory import org.openkilda.functionaltests.helpers.model.FlowActionType diff --git a/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/flows/ThrottlingRerouteSpec.groovy b/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/flows/ThrottlingRerouteSpec.groovy index 4a35058d482..e4d6d855bce 100644 --- a/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/flows/ThrottlingRerouteSpec.groovy +++ b/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/flows/ThrottlingRerouteSpec.groovy @@ -1,5 +1,6 @@ package org.openkilda.functionaltests.spec.flows +import static java.time.format.DateTimeFormatter.ISO_OFFSET_DATE_TIME import static org.junit.jupiter.api.Assumptions.assumeTrue import static org.openkilda.functionaltests.extension.tags.Tag.ISL_RECOVER_ON_FAIL import static org.openkilda.functionaltests.extension.tags.Tag.SMOKE @@ -26,6 +27,7 @@ import spock.lang.Ignore import spock.lang.Narrative import spock.lang.Shared +import java.time.ZonedDateTime import java.util.concurrent.TimeUnit @Narrative(""" @@ -76,7 +78,8 @@ class ThrottlingRerouteSpec extends HealthCheckSpecification { } def rerouteTimestamp = flows.first().retrieveFlowHistory().entries.last().timestampIso // check time diff between the time when reroute was triggered and the first action of reroute in history - def differenceInMillis = flowHelper.convertStringTimestampIsoToLong(rerouteTimestamp) - rerouteTriggersEnd + def rerouteStarts = ZonedDateTime.parse(rerouteTimestamp, ISO_OFFSET_DATE_TIME).toInstant().toEpochMilli() + def differenceInMillis = rerouteStarts - rerouteTriggersEnd // reroute starts not earlier than the expected reroute delay assert differenceInMillis > (rerouteDelay) * 1000 // reroute starts not later than 2 seconds later than the expected delay diff --git a/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/flows/VxlanFlowSpec.groovy b/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/flows/VxlanFlowSpec.groovy index 8e8ed566850..fba4b9ae2ef 100644 --- a/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/flows/VxlanFlowSpec.groovy +++ b/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/flows/VxlanFlowSpec.groovy @@ -23,7 +23,9 @@ import org.openkilda.functionaltests.helpers.model.SwitchPair import org.openkilda.functionaltests.helpers.model.SwitchRulesFactory import org.openkilda.functionaltests.model.stats.Direction import org.openkilda.messaging.info.event.PathNode +import org.openkilda.messaging.info.rule.FlowEntry import org.openkilda.messaging.payload.flow.FlowState +import org.openkilda.model.SwitchId import org.openkilda.model.cookie.Cookie import org.openkilda.northbound.dto.v1.switches.SwitchPropertiesDto import org.openkilda.testing.model.topology.TopologyDefinition.Switch @@ -249,12 +251,16 @@ class VxlanFlowSpec extends HealthCheckSpecification { .build().create() then: "Flow is created with protected path" - def flowPathInfo = flow.retrieveDetails() - flowPathInfo.allocateProtectedPath + def flowPathInfo = flow.retrieveAllEntityPaths() + !flowPathInfo.flowPath.protectedPath.isPathAbsent() flow.retrieveDetails().statusDetails and: "Rules for main and protected paths are created" - Wrappers.wait(WAIT_OFFSET) { flowHelper.verifyRulesOnProtectedFlow(flow.flowId) } + Wrappers.wait(WAIT_OFFSET) { + HashMap> flowInvolvedSwitchesWithRules = flowPathInfo.getInvolvedSwitches() + .collectEntries{ [(it): switchRulesFactory.get(it).getRules()] } as HashMap> + flow.verifyRulesForProtectedFlowOnSwitches(flowInvolvedSwitchesWithRules) + } def flowInfoFromDb = flow.retrieveDetailsFromDB() // ingressRule should contain "pushVxlan" diff --git a/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/flows/haflows/HaFlowUpdateSpec.groovy b/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/flows/haflows/HaFlowUpdateSpec.groovy index 2ab02af0a44..8e38fef882d 100644 --- a/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/flows/haflows/HaFlowUpdateSpec.groovy +++ b/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/flows/haflows/HaFlowUpdateSpec.groovy @@ -1,7 +1,8 @@ package org.openkilda.functionaltests.spec.flows.haflows +import static org.openkilda.functionaltests.helpers.SwitchHelper.randomVlan + import org.openkilda.functionaltests.helpers.Wrappers -import org.openkilda.model.Switch import static org.openkilda.functionaltests.extension.tags.Tag.HA_FLOW @@ -300,8 +301,8 @@ At least one of subflow endpoint switch id must differ from shared endpoint swit } private void setRandomVlans(HaFlowExtended payload) { - payload.sharedEndpoint.vlanId = flowHelperV2.randomVlan([payload.sharedEndpoint.vlanId]) - payload.subFlows.forEach { it.endpointVlan = flowHelperV2.randomVlan([it.endpointVlan]) } + payload.sharedEndpoint.vlanId = randomVlan([payload.sharedEndpoint.vlanId]) + payload.subFlows.forEach { it.endpointVlan = randomVlan([it.endpointVlan]) } } def "User cannot partial update an HA-Flow with #data.descr"() { diff --git a/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/flows/yflows/SubFlowSpec.groovy b/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/flows/yflows/SubFlowSpec.groovy index ca2f6ce9c9d..75cf2b453dc 100644 --- a/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/flows/yflows/SubFlowSpec.groovy +++ b/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/flows/yflows/SubFlowSpec.groovy @@ -1,5 +1,8 @@ package org.openkilda.functionaltests.spec.flows.yflows +import static org.openkilda.functionaltests.helpers.FlowNameGenerator.FLOW +import static org.openkilda.functionaltests.helpers.SwitchHelper.randomVlan + import org.openkilda.functionaltests.HealthCheckSpecification import org.openkilda.functionaltests.error.flow.FlowNotModifiedExpectedError import org.openkilda.functionaltests.helpers.model.YFlowActionType @@ -114,13 +117,13 @@ class SubFlowSpec extends HealthCheckSpecification { action: "create a mirrorPoint on", method: { SubFlow sFlow -> def mirrorEndpoint = FlowMirrorPointPayload.builder() - .mirrorPointId(flowHelperV2.generateFlowId()) + .mirrorPointId(FLOW.generateId()) .mirrorPointDirection(FlowPathDirection.FORWARD.toString().toLowerCase()) .mirrorPointSwitchId(sFlow.endpoint.switchId) .sinkEndpoint(FlowEndpointV2.builder().switchId(sFlow.endpoint.switchId) .portNumber(topology.getAllowedPortsForSwitch( topology.activeSwitches.find { it.dpId == sFlow.endpoint.switchId }).first()) - .vlanId(flowHelperV2.randomVlan()) + .vlanId(randomVlan()) .build()) .build() northboundV2.createMirrorPoint(sFlow.flowId, mirrorEndpoint) diff --git a/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/flows/yflows/YFlowDiversitySpec.groovy b/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/flows/yflows/YFlowDiversitySpec.groovy index 5acd668e8bc..821bf79e566 100644 --- a/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/flows/yflows/YFlowDiversitySpec.groovy +++ b/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/flows/yflows/YFlowDiversitySpec.groovy @@ -69,8 +69,8 @@ class YFlowDiversitySpec extends HealthCheckSpecification { allInvolvedIsls.unique(false) == allInvolvedIsls and: "Y-Flows histories contains 'diverse' information" - [yFlow2.subFlows.first(), yFlow3.subFlows.first()].each {//yFlow1 had no diversity at the time of creation - assert flowHelper.getEarliestHistoryEntryByAction(it.flowId, FlowActionType.CREATE.value).dumps + [yFlow2, yFlow3].each {//yFlow1 had no diversity at the time of creation + assert it.retrieveSubFlowHistory(it.subFlows.first().flowId).getEntriesByType(FlowActionType.CREATE).first().dumps .find { it.type == "stateAfter" }?.diverseGroupId } @@ -85,7 +85,7 @@ class YFlowDiversitySpec extends HealthCheckSpecification { } then: "Y-Flows' histories contain 'diverseGroupId' information in 'delete' operation" - verifyAll(flowHelper.getEarliestHistoryEntryByAction(yFlow1.subFlows[0].flowId, FlowActionType.DELETE.value).dumps) { + verifyAll(yFlow1.retrieveSubFlowHistory(yFlow1.subFlows[0].flowId).getEntriesByType(FlowActionType.DELETE).first().dumps) { it.find { it.type == "stateBefore" }?.diverseGroupId !it.find { it.type == "stateAfter" }?.diverseGroupId } @@ -136,7 +136,7 @@ class YFlowDiversitySpec extends HealthCheckSpecification { assert involvedIslSubFlowAfterUpdate != involvedIslSimpleFlow and: "First sub flow history contains 'groupId' information" - verifyAll(flowHelper.getEarliestHistoryEntryByAction(subFlowId, FlowActionType.UPDATE.value).dumps) { + verifyAll(yFlow.retrieveSubFlowHistory(subFlowId).getEntriesByType(FlowActionType.UPDATE).first().dumps) { !it.find { it.type == "stateBefore" }?.diverseGroupId it.find { it.type == "stateAfter" }?.diverseGroupId } diff --git a/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/switches/LagPortSpec.groovy b/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/switches/LagPortSpec.groovy index 8e0f180648e..a27489460bb 100644 --- a/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/switches/LagPortSpec.groovy +++ b/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/switches/LagPortSpec.groovy @@ -1,9 +1,9 @@ package org.openkilda.functionaltests.spec.switches import static groovyx.gpars.GParsPool.withPool -import static org.openkilda.functionaltests.helpers.FlowHelperV2.randomVlan import static org.openkilda.functionaltests.extension.tags.Tag.HARDWARE import static org.openkilda.functionaltests.extension.tags.Tag.SWITCH_RECOVER_ON_FAIL +import static org.openkilda.functionaltests.helpers.SwitchHelper.randomVlan import static org.openkilda.model.MeterId.LACP_REPLY_METER_ID import static org.openkilda.model.cookie.Cookie.DROP_SLOW_PROTOCOLS_LOOP_COOKIE import static org.openkilda.testing.Constants.NON_EXISTENT_SWITCH_ID diff --git a/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/switches/SwitchesFlowsV2Spec.groovy b/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/switches/SwitchesFlowsV2Spec.groovy index 5d578ba88c8..6bd86db1f9a 100644 --- a/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/switches/SwitchesFlowsV2Spec.groovy +++ b/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/switches/SwitchesFlowsV2Spec.groovy @@ -3,7 +3,7 @@ package org.openkilda.functionaltests.spec.switches import static org.junit.jupiter.api.Assumptions.assumeTrue import static org.openkilda.functionaltests.extension.tags.Tag.LOW_PRIORITY import static org.openkilda.functionaltests.extension.tags.Tag.SMOKE -import static org.openkilda.functionaltests.helpers.FlowHelperV2.randomVlan +import static org.openkilda.functionaltests.helpers.SwitchHelper.randomVlan import static org.openkilda.functionaltests.model.cleanup.CleanupAfter.CLASS import static org.openkilda.messaging.payload.flow.FlowState.UP @@ -84,8 +84,11 @@ class SwitchesFlowsV2Spec extends HealthCheckSpecification { def "System allows to get flows on particular ports on switch"() { given: "Y-Flow subflow which starts on switch" and: "List of the ports that subflow uses on switch, received from flow path" - def usedPortsList = flowHelper."get ports that flow uses on switch from path"(yFlowSubFlow2Id, - switchTriplet.getShared().getDpId()) + def usedPortsList = yFlow.retrieveAllEntityPaths().subFlowPaths.find { it.flowId == yFlowSubFlow2Id } + .collect { + (it.path.forward.getNodes().nodes + it?.protectedPath?.forward?.getNodes()?.nodes) + .findAll { it?.switchId == switchTriplet.getShared().getDpId() }.portNo + }.flatten() when: "Get all flows on the switch ports used by subflow under test" def response = switchHelper.getFlowsV2(switchTriplet.getShared(), usedPortsList) From cfbf09040885e4b9a891440c790c58e00cae3e2f Mon Sep 17 00:00:00 2001 From: Ivan Chupin Date: Tue, 23 Jul 2024 14:32:30 +0200 Subject: [PATCH 4/7] Add split kafka message support for flow validation process --- .../floodlight-api/build.gradle | 3 + .../response/ChunkedSpeakerDataResponse.java | 76 ++++++ .../ChunkedSpeakerDataResponseTest.java | 130 ++++++++++ .../floodlight/kafka/RecordHandler.java | 58 +++-- .../service/kafka/IKafkaProducerService.java | 4 + .../service/kafka/KafkaProducerService.java | 10 + .../wfm/topology/flowhs/FlowHsTopology.java | 2 +- .../topology/flowhs/FlowHsTopologyConfig.java | 4 + .../bolts/SpeakerWorkerForDumpsBolt.java | 48 ++-- .../SpeakerCommandForDumpsCarrier.java | 27 ++ .../service/SpeakerWorkerForDumpsService.java | 164 ++++++++++++ .../SpeakerWorkerForDumpsServiceTest.java | 236 ++++++++++++++++++ 12 files changed, 726 insertions(+), 36 deletions(-) create mode 100644 src-java/floodlight-service/floodlight-api/src/main/java/org/openkilda/floodlight/api/response/ChunkedSpeakerDataResponse.java create mode 100644 src-java/floodlight-service/floodlight-api/src/test/java/org/openkilda/floodlight/api/response/ChunkedSpeakerDataResponseTest.java create mode 100644 src-java/flowhs-topology/flowhs-storm-topology/src/main/java/org/openkilda/wfm/topology/flowhs/service/SpeakerCommandForDumpsCarrier.java create mode 100644 src-java/flowhs-topology/flowhs-storm-topology/src/main/java/org/openkilda/wfm/topology/flowhs/service/SpeakerWorkerForDumpsService.java create mode 100644 src-java/flowhs-topology/flowhs-storm-topology/src/test/java/org/openkilda/wfm/topology/flowhs/service/SpeakerWorkerForDumpsServiceTest.java diff --git a/src-java/floodlight-service/floodlight-api/build.gradle b/src-java/floodlight-service/floodlight-api/build.gradle index abc77bebd9e..9701fde3792 100644 --- a/src-java/floodlight-service/floodlight-api/build.gradle +++ b/src-java/floodlight-service/floodlight-api/build.gradle @@ -12,9 +12,12 @@ dependencies { implementation 'com.fasterxml.jackson.core:jackson-databind' implementation 'com.google.guava:guava' implementation 'org.apache.commons:commons-lang3' + implementation 'org.apache.commons:commons-collections4' implementation 'org.slf4j:slf4j-api' + testImplementation 'org.junit.jupiter:junit-jupiter-api' testImplementation 'org.junit.jupiter:junit-jupiter-engine' + testImplementation 'org.mockito:mockito-junit-jupiter' compileOnly 'org.projectlombok:lombok' annotationProcessor 'org.projectlombok:lombok' diff --git a/src-java/floodlight-service/floodlight-api/src/main/java/org/openkilda/floodlight/api/response/ChunkedSpeakerDataResponse.java b/src-java/floodlight-service/floodlight-api/src/main/java/org/openkilda/floodlight/api/response/ChunkedSpeakerDataResponse.java new file mode 100644 index 00000000000..18848440017 --- /dev/null +++ b/src-java/floodlight-service/floodlight-api/src/main/java/org/openkilda/floodlight/api/response/ChunkedSpeakerDataResponse.java @@ -0,0 +1,76 @@ +/* Copyright 2024 Telstra Open Source + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.openkilda.floodlight.api.response; + +import org.openkilda.messaging.MessageContext; +import org.openkilda.messaging.MessageData; + +import com.fasterxml.jackson.annotation.JsonProperty; +import lombok.EqualsAndHashCode; +import lombok.Getter; +import lombok.NonNull; +import lombok.ToString; +import org.apache.commons.collections4.CollectionUtils; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.List; + +@Getter +@ToString(callSuper = true) +@EqualsAndHashCode(callSuper = true) +public class ChunkedSpeakerDataResponse extends SpeakerDataResponse { + + @JsonProperty("message_id") + private String messageId; + + @JsonProperty("total_messages") + private int totalMessages; + + public ChunkedSpeakerDataResponse(@JsonProperty("data") MessageData data, + @JsonProperty("message_context") @NonNull MessageContext messageContext, + @JsonProperty("total_messages") int totalMessages) { + super(messageContext, data); + this.messageId = messageContext.getCorrelationId(); + this.totalMessages = totalMessages; + } + + ChunkedSpeakerDataResponse(MessageData data, MessageContext messageContext, int totalMessages, int messageIndex) { + this(data, messageContext, totalMessages); + this.messageId = String.join(" : ", String.valueOf(messageIndex), + messageContext.getCorrelationId()); + } + + /** + * Creates list of ChunkedInfoMessages from list of InfoData. + */ + public static List createChunkedList( + Collection dataCollection, MessageContext messageContext) { + + if (CollectionUtils.isEmpty(dataCollection)) { + return Collections.singletonList(new ChunkedSpeakerDataResponse(null, messageContext, 0)); + } + + List result = new ArrayList<>(); + int i = 0; + for (MessageData messageData : dataCollection) { + result.add(new ChunkedSpeakerDataResponse( + messageData, messageContext, dataCollection.size(), i++)); + } + return result; + } +} diff --git a/src-java/floodlight-service/floodlight-api/src/test/java/org/openkilda/floodlight/api/response/ChunkedSpeakerDataResponseTest.java b/src-java/floodlight-service/floodlight-api/src/test/java/org/openkilda/floodlight/api/response/ChunkedSpeakerDataResponseTest.java new file mode 100644 index 00000000000..423a64d155c --- /dev/null +++ b/src-java/floodlight-service/floodlight-api/src/test/java/org/openkilda/floodlight/api/response/ChunkedSpeakerDataResponseTest.java @@ -0,0 +1,130 @@ +/* Copyright 2024 Telstra Open Source + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.openkilda.floodlight.api.response; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertSame; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.mockito.Mockito.mock; + +import org.openkilda.messaging.MessageContext; +import org.openkilda.messaging.MessageData; + +import org.junit.jupiter.api.Test; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; + +public class ChunkedSpeakerDataResponseTest { + + /** + * Tests the createChunkedList method to ensure it throws a NullPointerException + * when passed a null collection. This verifies that the method correctly handles + * null input by throwing the appropriate exception. + */ + @Test + void testCreateChunkedListWithNullCollection() { + + MessageContext messageContext = new MessageContext("Correlation Id"); + + // Act & Assert + NullPointerException thrown = assertThrows(NullPointerException.class, () -> + ChunkedSpeakerDataResponse.createChunkedList(null, messageContext), + "Expected createChunkedList() to throw, but it didn't" + ); + assertEquals("data is marked non-null but is null", thrown.getMessage(), + "Exception message should match"); + } + + /** + * Tests the createChunkedList method to ensure it throws a NullPointerException + * when passed an empty collection. This verifies that the method correctly handles + * empty collections by throwing the appropriate exception. + */ + @Test + void testCreateChunkedListWithEmptyCollection() { + // Arrange + MessageContext messageContext = new MessageContext("Correlation Id"); + + NullPointerException thrown = assertThrows(NullPointerException.class, () -> + ChunkedSpeakerDataResponse.createChunkedList(Collections.emptyList(), messageContext), + "Expected createChunkedList() to throw, but it didn't" + ); + assertEquals("data is marked non-null but is null", thrown.getMessage(), + "Exception message should match"); + } + + /** + * Tests the createChunkedList method with a collection containing a single item. + * Verifies that the method correctly processes the single item and sets the + * messageId and totalMessages fields appropriately. + */ + @Test + void testCreateChunkedListWithOneItem() { + // Arrange + MessageContext messageContext = new MessageContext("Correlation Id"); + MessageData mockData = mock(MessageData.class); + + // Act + List result + = ChunkedSpeakerDataResponse.createChunkedList(Collections.singletonList(mockData), messageContext); + + // Assert + assertEquals(1, result.size(), "Result list should contain one element"); + ChunkedSpeakerDataResponse response = result.get(0); + assertSame(mockData, response.getData(), "Data should be the same as the input"); + assertEquals(1, response.getTotalMessages(), "Total messages should be 1"); + assertEquals("0 : Correlation Id", response.getMessageId(), + "Message ID should be '0 : Correlation Id'"); + } + + /** + * Tests the createChunkedList method with a collection containing multiple items. + * Verifies that the method correctly processes each item, setting the messageId + * and totalMessages fields appropriately for each element in the collection. + */ + @Test + void testCreateChunkedListWithMultipleItems() { + // Arrange + MessageContext messageContext = new MessageContext("mockCorrelationId"); + MessageData mockData1 = mock(MessageData.class); + MessageData mockData2 = mock(MessageData.class); + + List dataCollection = new ArrayList<>(); + dataCollection.add(mockData1); + dataCollection.add(mockData2); + + // Act + List result + = ChunkedSpeakerDataResponse.createChunkedList(dataCollection, messageContext); + + // Assert + assertEquals(2, result.size(), "Result list should contain two elements"); + + ChunkedSpeakerDataResponse response1 = result.get(0); + assertSame(mockData1, response1.getData(), "First response data should be mockData1"); + assertEquals(2, response1.getTotalMessages(), "Total messages should be 2"); + assertEquals("0 : mockCorrelationId", response1.getMessageId(), + "First message ID should be '0 : mockCorrelationId'"); + + ChunkedSpeakerDataResponse response2 = result.get(1); + assertSame(mockData2, response2.getData(), "Second response data should be mockData2"); + assertEquals(2, response2.getTotalMessages(), "Total messages should be 2"); + assertEquals("1 : mockCorrelationId", response2.getMessageId(), + "Second message ID should be '1 : mockCorrelationId'"); + } +} diff --git a/src-java/floodlight-service/floodlight-modules/src/main/java/org/openkilda/floodlight/kafka/RecordHandler.java b/src-java/floodlight-service/floodlight-modules/src/main/java/org/openkilda/floodlight/kafka/RecordHandler.java index 8955de24ca5..2a50b9d5909 100644 --- a/src-java/floodlight-service/floodlight-modules/src/main/java/org/openkilda/floodlight/kafka/RecordHandler.java +++ b/src-java/floodlight-service/floodlight-modules/src/main/java/org/openkilda/floodlight/kafka/RecordHandler.java @@ -283,22 +283,22 @@ private void doDeleteSwitchRules(final CommandMessage message) { } private void doDumpRulesRequest(CommandMessage message) { - processDumpRulesRequest(((DumpRulesRequest) message.getData()).getSwitchId(), + dumpRulesRequest(((DumpRulesRequest) message.getData()).getSwitchId(), buildSenderToNorthbound(message), message); } private void doDumpRulesForSwitchManagerRequest(CommandMessage message) { - processDumpRulesRequest(((DumpRulesForSwitchManagerRequest) message.getData()).getSwitchId(), + dumpRulesRequest(((DumpRulesForSwitchManagerRequest) message.getData()).getSwitchId(), buildSenderToSwitchManager(message), message); } private void doDumpRulesForFlowHsRequest(CommandMessage message) { - processDumpRulesRequest(((DumpRulesForFlowHsRequest) message.getData()).getSwitchId(), + dumpRulesRequest(((DumpRulesForFlowHsRequest) message.getData()).getSwitchId(), buildSenderToFlowHs(message), message); } - private void processDumpRulesRequest(SwitchId switchId, java.util.function.Consumer sender, - CommandMessage commandMessage) { + private void dumpRulesRequest(SwitchId switchId, java.util.function.Consumer sender, + CommandMessage commandMessage) { try { logger.debug("Loading installed rules for switch {}", switchId); List flowEntries = @@ -508,22 +508,22 @@ private void doDumpPortDescriptionRequest(CommandMessage message) { private void doDumpMetersRequest(CommandMessage message) { DumpMetersRequest request = (DumpMetersRequest) message.getData(); - dumpMeters(request.getSwitchId(), buildSenderToNorthbound(message), message); + dumpMetersRequest(request.getSwitchId(), buildSenderToNorthbound(message), message); } private void doDumpMetersForSwitchManagerRequest(CommandMessage message) { DumpMetersForSwitchManagerRequest request = (DumpMetersForSwitchManagerRequest) message.getData(); - dumpMeters(request.getSwitchId(), buildSenderToSwitchManager(message), message); + dumpMetersRequest(request.getSwitchId(), buildSenderToSwitchManager(message), message); } private void doDumpMetersForFlowHsRequest(CommandMessage message) { DumpMetersForFlowHsRequest request = (DumpMetersForFlowHsRequest) message.getData(); - dumpMeters(request.getSwitchId(), buildSenderToFlowHs(message), message); + dumpMetersRequest(request.getSwitchId(), buildSenderToFlowHs(message), message); } - private void dumpMeters(SwitchId switchId, - java.util.function.Consumer sender, - CommandMessage message) { + private void dumpMetersRequest(SwitchId switchId, + java.util.function.Consumer sender, + CommandMessage message) { try { logger.debug("Get all meters for switch {}", switchId); ISwitchManager switchManager = context.getSwitchManager(); @@ -627,6 +627,32 @@ private java.util.function.Consumer buildSenderToSwitchManager(Mess }; } + private java.util.function.Consumer buildSenderToFlowHs(Message message) { + IKafkaProducerService producerService = getKafkaProducer(); + return data -> { + MessageContext messageContext = new MessageContext(message); + if (data instanceof Chunkable) { + List chunks = ((Chunkable) data).split( + context.getKafkaChannel().getConfig().getMessagesBatchSize()); + if (chunks.isEmpty()) { + sendSpeakerDataResponse(producerService, message, messageContext, data); + } else { + producerService.sendChunkedSpeakerDataAndTrack( + context.getKafkaSpeakerFlowHsTopic(), messageContext, chunks); + } + } else { + sendSpeakerDataResponse(producerService, message, messageContext, data); + } + }; + } + + private void sendSpeakerDataResponse(IKafkaProducerService producerService, Message message, + MessageContext messageContext, MessageData data) { + SpeakerDataResponse result = new SpeakerDataResponse(messageContext, data); + producerService.sendMessageAndTrack(context.getKafkaSpeakerFlowHsTopic(), + message.getCorrelationId(), result); + } + private java.util.function.Consumer buildSenderToNorthbound(Message message) { return buildSenderToTopic(context.getKafkaNorthboundTopic(), message.getCorrelationId(), message.getTimestamp()); @@ -648,16 +674,6 @@ private java.util.function.Consumer buildSenderToTopic(String kafka }; } - private java.util.function.Consumer buildSenderToFlowHs(Message message) { - IKafkaProducerService producerService = getKafkaProducer(); - return data -> { - MessageContext messageContext = new MessageContext(message); - SpeakerDataResponse result = new SpeakerDataResponse(messageContext, data); - producerService.sendMessageAndTrack(context.getKafkaSpeakerFlowHsTopic(), - message.getCorrelationId(), result); - }; - } - private void doModifyMeterRequest(CommandMessage message) { MeterModifyCommandRequest request = (MeterModifyCommandRequest) message.getData(); diff --git a/src-java/floodlight-service/floodlight-modules/src/main/java/org/openkilda/floodlight/service/kafka/IKafkaProducerService.java b/src-java/floodlight-service/floodlight-modules/src/main/java/org/openkilda/floodlight/service/kafka/IKafkaProducerService.java index 22f0726082a..981f053355d 100644 --- a/src-java/floodlight-service/floodlight-modules/src/main/java/org/openkilda/floodlight/service/kafka/IKafkaProducerService.java +++ b/src-java/floodlight-service/floodlight-modules/src/main/java/org/openkilda/floodlight/service/kafka/IKafkaProducerService.java @@ -18,6 +18,7 @@ import org.openkilda.floodlight.service.IService; import org.openkilda.messaging.AbstractMessage; import org.openkilda.messaging.Message; +import org.openkilda.messaging.MessageContext; import org.openkilda.messaging.info.InfoData; import java.util.Collection; @@ -31,6 +32,9 @@ public interface IKafkaProducerService extends IService { void sendChunkedMessageAndTrack(String topic, String key, Collection data); + void sendChunkedSpeakerDataAndTrack(String topic, MessageContext messageContext, + Collection data); + void sendMessageAndTrackWithZk(String topic, Message message); void sendMessageAndTrackWithZk(String topic, String key, Message message); diff --git a/src-java/floodlight-service/floodlight-modules/src/main/java/org/openkilda/floodlight/service/kafka/KafkaProducerService.java b/src-java/floodlight-service/floodlight-modules/src/main/java/org/openkilda/floodlight/service/kafka/KafkaProducerService.java index 6530554ebc5..c46521938ba 100644 --- a/src-java/floodlight-service/floodlight-modules/src/main/java/org/openkilda/floodlight/service/kafka/KafkaProducerService.java +++ b/src-java/floodlight-service/floodlight-modules/src/main/java/org/openkilda/floodlight/service/kafka/KafkaProducerService.java @@ -19,10 +19,12 @@ import org.openkilda.bluegreen.LifecycleEvent; import org.openkilda.bluegreen.Signal; +import org.openkilda.floodlight.api.response.ChunkedSpeakerDataResponse; import org.openkilda.floodlight.service.zookeeper.ZooKeeperEventObserver; import org.openkilda.floodlight.service.zookeeper.ZooKeeperService; import org.openkilda.messaging.AbstractMessage; import org.openkilda.messaging.Message; +import org.openkilda.messaging.MessageContext; import org.openkilda.messaging.info.ChunkedInfoMessage; import org.openkilda.messaging.info.InfoData; import org.openkilda.messaging.info.InfoMessage; @@ -87,6 +89,14 @@ public void sendChunkedMessageAndTrack(String topic, String key, Collection data) { + for (AbstractMessage message : ChunkedSpeakerDataResponse.createChunkedList(data, messageContext)) { + sendMessageAndTrack(topic, messageContext.getCorrelationId(), message); + } + } + @Override public void sendMessageAndTrackWithZk(String topic, Message message) { if (active.get()) { diff --git a/src-java/flowhs-topology/flowhs-storm-topology/src/main/java/org/openkilda/wfm/topology/flowhs/FlowHsTopology.java b/src-java/flowhs-topology/flowhs-storm-topology/src/main/java/org/openkilda/wfm/topology/flowhs/FlowHsTopology.java index daa39db50fe..911f19d323f 100644 --- a/src-java/flowhs-topology/flowhs-storm-topology/src/main/java/org/openkilda/wfm/topology/flowhs/FlowHsTopology.java +++ b/src-java/flowhs-topology/flowhs-storm-topology/src/main/java/org/openkilda/wfm/topology/flowhs/FlowHsTopology.java @@ -946,7 +946,7 @@ private void validationSpeakerWorkerForDumpsBolt(TopologyBuilder topologyBuilder .hubComponent(ComponentId.YFLOW_VALIDATION_HUB.name()) .hubComponent(ComponentId.HA_FLOW_VALIDATION_HUB.name()) .streamToHub(SPEAKER_WORKER_TO_HUB_VALIDATION.name()) - .build()); + .build(), topologyConfig.getChunkedMessagesExpirationMinutes()); declareBolt(topologyBuilder, speakerWorkerForDumpsBolt, ComponentId.FLOW_VALIDATION_SPEAKER_WORKER.name()) .fieldsGrouping(ComponentId.SPEAKER_WORKER_SPOUT.name(), FIELDS_KEY) diff --git a/src-java/flowhs-topology/flowhs-storm-topology/src/main/java/org/openkilda/wfm/topology/flowhs/FlowHsTopologyConfig.java b/src-java/flowhs-topology/flowhs-storm-topology/src/main/java/org/openkilda/wfm/topology/flowhs/FlowHsTopologyConfig.java index 7587765bc32..2dec6712e19 100644 --- a/src-java/flowhs-topology/flowhs-storm-topology/src/main/java/org/openkilda/wfm/topology/flowhs/FlowHsTopologyConfig.java +++ b/src-java/flowhs-topology/flowhs-storm-topology/src/main/java/org/openkilda/wfm/topology/flowhs/FlowHsTopologyConfig.java @@ -87,6 +87,10 @@ default String getFlowStatsNotifyTopic() { @Default("10") int getSpeakerTimeoutSeconds(); + @Key("kafka.chunked.messages.expiration.minutes") + @Default("15") + int getChunkedMessagesExpirationMinutes(); + @Key("flow.create.speaker.command.retries") @Default("3") int getCreateSpeakerCommandRetries(); diff --git a/src-java/flowhs-topology/flowhs-storm-topology/src/main/java/org/openkilda/wfm/topology/flowhs/bolts/SpeakerWorkerForDumpsBolt.java b/src-java/flowhs-topology/flowhs-storm-topology/src/main/java/org/openkilda/wfm/topology/flowhs/bolts/SpeakerWorkerForDumpsBolt.java index 40d05525302..ac458f0a30c 100644 --- a/src-java/flowhs-topology/flowhs-storm-topology/src/main/java/org/openkilda/wfm/topology/flowhs/bolts/SpeakerWorkerForDumpsBolt.java +++ b/src-java/flowhs-topology/flowhs-storm-topology/src/main/java/org/openkilda/wfm/topology/flowhs/bolts/SpeakerWorkerForDumpsBolt.java @@ -18,12 +18,14 @@ import static org.openkilda.wfm.topology.flowhs.FlowHsTopology.Stream.SPEAKER_WORKER_REQUEST_SENDER; import static org.openkilda.wfm.topology.utils.KafkaRecordTranslator.FIELD_ID_PAYLOAD; +import org.openkilda.floodlight.api.response.ChunkedSpeakerDataResponse; import org.openkilda.floodlight.api.response.SpeakerDataResponse; +import org.openkilda.messaging.MessageData; import org.openkilda.messaging.command.CommandMessage; -import org.openkilda.messaging.error.ErrorData; -import org.openkilda.messaging.error.ErrorType; import org.openkilda.wfm.error.PipelineException; import org.openkilda.wfm.share.hubandspoke.WorkerBolt; +import org.openkilda.wfm.topology.flowhs.service.SpeakerCommandForDumpsCarrier; +import org.openkilda.wfm.topology.flowhs.service.SpeakerWorkerForDumpsService; import org.openkilda.wfm.topology.utils.MessageKafkaTranslator; import com.google.common.base.Preconditions; @@ -31,9 +33,20 @@ import org.apache.storm.tuple.Tuple; import org.apache.storm.tuple.Values; -public class SpeakerWorkerForDumpsBolt extends WorkerBolt { - public SpeakerWorkerForDumpsBolt(Config config) { +public class SpeakerWorkerForDumpsBolt extends WorkerBolt implements SpeakerCommandForDumpsCarrier { + + private transient SpeakerWorkerForDumpsService service; + private final int chunkedMessagesExpirationMinutes; + + public SpeakerWorkerForDumpsBolt(Config config, int chunkedMessagesExpirationMinutes) { super(config); + this.chunkedMessagesExpirationMinutes = chunkedMessagesExpirationMinutes; + } + + @Override + protected void init() { + super.init(); + service = new SpeakerWorkerForDumpsService(this, chunkedMessagesExpirationMinutes); } @Override @@ -44,17 +57,19 @@ protected void onHubRequest(Tuple requestTuple) throws Exception { // Due to specific request handling in FL, we have to provide the key and correlationId which are equal. Preconditions.checkArgument(key.equals(request.getCorrelationId()), "Tuple %s has the key which doesn't correspond to correlationId", requestTuple); - - emitWithContext(SPEAKER_WORKER_REQUEST_SENDER.name(), getCurrentTuple(), new Values(key, request)); + service.sendCommand(key, request); } @Override protected void onAsyncResponse(Tuple requestTuple, Tuple responseTuple) throws Exception { String key = pullKey(); Object payload = responseTuple.getValueByField(FIELD_ID_PAYLOAD); - if (payload instanceof SpeakerDataResponse) { + if (payload instanceof ChunkedSpeakerDataResponse) { + ChunkedSpeakerDataResponse chunkedInfoMessage = (ChunkedSpeakerDataResponse) payload; + service.handleChunkedResponse(key, chunkedInfoMessage); + } else if (payload instanceof SpeakerDataResponse) { SpeakerDataResponse dataResponse = (SpeakerDataResponse) payload; - emitResponseToHub(getCurrentTuple(), new Values(key, dataResponse.getData(), getCommandContext())); + service.handleResponse(key, dataResponse); } else { log.debug("Unknown response received: {}", payload); } @@ -63,12 +78,7 @@ protected void onAsyncResponse(Tuple requestTuple, Tuple responseTuple) throws E @Override public void onRequestTimeout(Tuple requestTuple) throws PipelineException { String key = pullKey(); - CommandMessage request = pullValue(requestTuple, FIELD_ID_PAYLOAD, CommandMessage.class); - - ErrorData errorData = new ErrorData(ErrorType.OPERATION_TIMED_OUT, - String.format("Timeout for waiting response on command %s", request), - "Error in SpeakerWorker"); - emitResponseToHub(getCurrentTuple(), new Values(key, errorData, getCommandContext())); + service.handleTimeout(key); } @Override @@ -77,4 +87,14 @@ public void declareOutputFields(OutputFieldsDeclarer declarer) { declarer.declareStream(SPEAKER_WORKER_REQUEST_SENDER.name(), MessageKafkaTranslator.STREAM_FIELDS); } + + @Override + public void sendCommand(String key, CommandMessage command) { + emitWithContext(SPEAKER_WORKER_REQUEST_SENDER.name(), getCurrentTuple(), new Values(key, command)); + } + + @Override + public void sendResponse(String key, MessageData response) throws PipelineException { + emitResponseToHub(getCurrentTuple(), new Values(key, response, getCommandContext())); + } } diff --git a/src-java/flowhs-topology/flowhs-storm-topology/src/main/java/org/openkilda/wfm/topology/flowhs/service/SpeakerCommandForDumpsCarrier.java b/src-java/flowhs-topology/flowhs-storm-topology/src/main/java/org/openkilda/wfm/topology/flowhs/service/SpeakerCommandForDumpsCarrier.java new file mode 100644 index 00000000000..75bf3bb2a47 --- /dev/null +++ b/src-java/flowhs-topology/flowhs-storm-topology/src/main/java/org/openkilda/wfm/topology/flowhs/service/SpeakerCommandForDumpsCarrier.java @@ -0,0 +1,27 @@ +/* Copyright 2024 Telstra Open Source + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.openkilda.wfm.topology.flowhs.service; + +import org.openkilda.messaging.MessageData; +import org.openkilda.messaging.command.CommandMessage; +import org.openkilda.wfm.error.PipelineException; + +public interface SpeakerCommandForDumpsCarrier { + + void sendCommand(String key, CommandMessage command) throws PipelineException; + + void sendResponse(String key, MessageData response) throws PipelineException; +} diff --git a/src-java/flowhs-topology/flowhs-storm-topology/src/main/java/org/openkilda/wfm/topology/flowhs/service/SpeakerWorkerForDumpsService.java b/src-java/flowhs-topology/flowhs-storm-topology/src/main/java/org/openkilda/wfm/topology/flowhs/service/SpeakerWorkerForDumpsService.java new file mode 100644 index 00000000000..5fe38b1845d --- /dev/null +++ b/src-java/flowhs-topology/flowhs-storm-topology/src/main/java/org/openkilda/wfm/topology/flowhs/service/SpeakerWorkerForDumpsService.java @@ -0,0 +1,164 @@ +/* Copyright 2024 Telstra Open Source + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.openkilda.wfm.topology.flowhs.service; + +import org.openkilda.floodlight.api.response.ChunkedSpeakerDataResponse; +import org.openkilda.floodlight.api.response.SpeakerDataResponse; +import org.openkilda.messaging.command.CommandMessage; +import org.openkilda.messaging.command.switches.DumpGroupsForFlowHsRequest; +import org.openkilda.messaging.command.switches.DumpMetersForFlowHsRequest; +import org.openkilda.messaging.command.switches.DumpRulesForFlowHsRequest; +import org.openkilda.messaging.error.ErrorData; +import org.openkilda.messaging.error.ErrorType; +import org.openkilda.messaging.info.flow.FlowDumpResponse; +import org.openkilda.messaging.info.group.GroupDumpResponse; +import org.openkilda.messaging.info.meter.MeterDumpResponse; +import org.openkilda.wfm.error.PipelineException; + +import lombok.NonNull; +import lombok.extern.slf4j.Slf4j; +import org.apache.commons.collections4.map.PassiveExpiringMap; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; + +@Slf4j +public class SpeakerWorkerForDumpsService { + private final SpeakerCommandForDumpsCarrier carrier; + private final Map keyToRequest = new HashMap<>(); + + /** + * The storage for received chunked message ids. It is needed to identify whether we have already received specific + * chunked message or not in order to do not have duplicates, because current version of kafka do not guarantee + * exactly once delivery. + */ + private final Map> chunkedMessageIdsPerRequest = new HashMap<>(); + /** + * Chains of chunked messages, it is filling by messages one by one as soon as the next linked message is received. + */ + private final Map> messagesChains; + + public SpeakerWorkerForDumpsService(@NonNull SpeakerCommandForDumpsCarrier carrier, + int chunkedMessagesExpirationMinutes) { + this.carrier = carrier; + messagesChains = new PassiveExpiringMap<>(chunkedMessagesExpirationMinutes, TimeUnit.MINUTES, new HashMap<>()); + } + + /** + * Sends command to speaker. + * + * @param key unique operation's key. + * @param request command to be executed. + */ + public void sendCommand(@NonNull String key, @NonNull CommandMessage request) throws PipelineException { + log.debug("Got a request from hub bolt {}", request); + keyToRequest.put(key, request); + carrier.sendCommand(key, request); + } + + /** + * Handles a timeout event by sending an error response to the hub. + * + * @param key the unique operation key associated with the timed-out request. + * @throws PipelineException if there is an error while sending the error response. + */ + public void handleTimeout(String key) throws PipelineException { + log.debug("Send timeout error to hub {}", key); + CommandMessage request = keyToRequest.remove(key); + chunkedMessageIdsPerRequest.remove(key); + ErrorData errorData = new ErrorData(ErrorType.OPERATION_TIMED_OUT, + String.format("Timeout for waiting response on command %s", request), + "Error in SpeakerWorker"); + carrier.sendResponse(key, errorData); + } + + /** + * Processes received response and forwards it to the hub component. + * + * @param key operation's key. + * @param response response payload. + */ + public void handleResponse(@NonNull String key, @NonNull SpeakerDataResponse response) + throws PipelineException { + log.debug("Got a response from speaker {}", response); + CommandMessage pendingRequest = keyToRequest.remove(key); + if (pendingRequest != null) { + carrier.sendResponse(key, response.getData()); + } + } + + /** + * Processes received chunked responses, combines them and forwards it to the hub component. + */ + public void handleChunkedResponse(String key, ChunkedSpeakerDataResponse response) throws PipelineException { + log.debug("Got chunked response from speaker {}", response); + chunkedMessageIdsPerRequest.computeIfAbsent(key, mappingFunction -> new HashSet<>()); + Set associatedMessages = chunkedMessageIdsPerRequest.get(key); + if (!associatedMessages.add(response.getMessageId())) { + log.debug("Skipping chunked message, it is already received: {}", response); + return; + } + + messagesChains.computeIfAbsent(key, mappingFunction -> new ArrayList<>()); + List chain = messagesChains.get(key); + if (response.getTotalMessages() != 0) { + chain.add(response); + } + + if (chain.size() == response.getTotalMessages()) { + completeChunkedResponse(key, chain); + } + } + + private void completeChunkedResponse(String key, List messages) + throws PipelineException { + + CommandMessage pending = keyToRequest.remove(key); + messagesChains.remove(key); + chunkedMessageIdsPerRequest.remove(key); + + if (pending == null || pending.getData() == null) { + return; + } + Object data = pending.getData(); + if (data instanceof DumpRulesForFlowHsRequest) { + List responses = process(messages, FlowDumpResponse.class); + carrier.sendResponse(key, FlowDumpResponse.unite(responses)); + } else if (data instanceof DumpMetersForFlowHsRequest) { + List responses = process(messages, MeterDumpResponse.class); + carrier.sendResponse(key, MeterDumpResponse.unite(responses)); + } else if (data instanceof DumpGroupsForFlowHsRequest) { + List responses = process(messages, GroupDumpResponse.class); + carrier.sendResponse(key, GroupDumpResponse.unite(responses)); + } else { + log.error("Unknown request payload for chunked response. Request contest: {}, key: {}, " + + "chunked data: {}", pending, key, messages); + } + } + + private List process(List messages, Class responseType) { + return messages.stream() + .map(SpeakerDataResponse::getData) + .map(responseType::cast) + .collect(Collectors.toList()); + } +} diff --git a/src-java/flowhs-topology/flowhs-storm-topology/src/test/java/org/openkilda/wfm/topology/flowhs/service/SpeakerWorkerForDumpsServiceTest.java b/src-java/flowhs-topology/flowhs-storm-topology/src/test/java/org/openkilda/wfm/topology/flowhs/service/SpeakerWorkerForDumpsServiceTest.java new file mode 100644 index 00000000000..a787c72b302 --- /dev/null +++ b/src-java/flowhs-topology/flowhs-storm-topology/src/test/java/org/openkilda/wfm/topology/flowhs/service/SpeakerWorkerForDumpsServiceTest.java @@ -0,0 +1,236 @@ +/* Copyright 2024 Telstra Open Source + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.openkilda.wfm.topology.flowhs.service; + +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyNoMoreInteractions; +import static org.mockito.Mockito.when; + +import org.openkilda.floodlight.api.response.ChunkedSpeakerDataResponse; +import org.openkilda.floodlight.api.response.SpeakerDataResponse; +import org.openkilda.messaging.MessageContext; +import org.openkilda.messaging.command.CommandMessage; +import org.openkilda.messaging.command.switches.DumpRulesForFlowHsRequest; +import org.openkilda.messaging.info.flow.FlowDumpResponse; +import org.openkilda.model.SwitchId; +import org.openkilda.model.cookie.FlowSegmentCookie; +import org.openkilda.rulemanager.FlowSpeakerData; +import org.openkilda.wfm.error.PipelineException; + +import com.google.common.collect.Lists; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +import java.lang.reflect.Field; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.stream.Collectors; + +class SpeakerWorkerForDumpsServiceTest { + private static final String CORRELATION_ID_1 = "COR_1"; + private static final SwitchId SWITCH_1 = new SwitchId(1); + + private SpeakerCommandForDumpsCarrier carrier; + private SpeakerWorkerForDumpsService service; + + @BeforeEach + public void setUp() { + carrier = mock(SpeakerCommandForDumpsCarrier.class); + int chunkedMessagesExpirationMinutes = 10; + service = new SpeakerWorkerForDumpsService(carrier, chunkedMessagesExpirationMinutes); + } + + /** + * Tests the sendCommand method by: + * - Verifying that a command message is sent with the correct key. + */ + @Test + public void testSendCommand() throws PipelineException { + String key = "test-key"; + CommandMessage request = mock(CommandMessage.class); + service.sendCommand(key, request); + verify(carrier).sendCommand(eq(key), eq(request)); + } + + /** + * Tests the handling of a standard speaker response by: + * - Sending a command and verifying it is sent correctly. + * - Handling a response and ensuring it triggers the correct response handling. + * - Verifying that the response is sent only once and no further interactions occur. + */ + @Test + public void testHandleResponse() throws PipelineException { + String key = "test-key"; + CommandMessage request = mock(CommandMessage.class); + MessageContext messageContext = new MessageContext(CORRELATION_ID_1); + + + FlowDumpResponse flowDumpResponse = buildFlowDumpResponse(SWITCH_1, 1); + SpeakerDataResponse response = new SpeakerDataResponse(messageContext, flowDumpResponse); + + service.sendCommand(key, request); + verify(carrier).sendCommand(eq(key), eq(request)); + service.handleResponse(key, response); + verify(carrier).sendResponse(eq(key), eq(response.getData())); + + verifyNoMoreInteractions(carrier); + service.handleResponse(key, response); + verify(carrier, times(1)).sendResponse(anyString(), any()); + } + + /** + * Tests the complete handling of chunked responses by: + * - Sending a command and verifying it is stored and sent. + * - Handling multiple chunked responses, ensuring duplicates are ignored. + * - Verifying that the service correctly unites and processes the complete set of chunked responses. + * - Ensuring all internal caches are cleared after processing. + */ + @Test + public void testHandleChunkedResponseComplete() throws PipelineException, + NoSuchFieldException, IllegalAccessException { + String key = "test-key"; + CommandMessage request = mock(CommandMessage.class); + DumpRulesForFlowHsRequest data = new DumpRulesForFlowHsRequest(SWITCH_1); + when(request.getData()).thenReturn(data); + + service.sendCommand(key, request); + verify(carrier).sendCommand(eq(key), eq(request)); + + List chinkedList = ChunkedSpeakerDataResponse.createChunkedList(Lists.newArrayList( + buildFlowDumpResponse(SWITCH_1, 1), + buildFlowDumpResponse(SWITCH_1, 2), + buildFlowDumpResponse(SWITCH_1, 2)), + new MessageContext(CORRELATION_ID_1)); + + // Access and track the state of messagesChains + Field messagesChainsField = SpeakerWorkerForDumpsService.class.getDeclaredField("messagesChains"); + messagesChainsField.setAccessible(true); + + @SuppressWarnings("unchecked") + Map> messagesChains = + (Map>) messagesChainsField.get(service); + + // handle first message response + service.handleChunkedResponse(key, chinkedList.get(0)); + Assertions.assertNotNull(messagesChains.get(key)); + Assertions.assertEquals(chinkedList.get(0), messagesChains.get(key).get(0)); + verifyNoMoreInteractions(carrier); + + // check handling for duplicate messages + service.handleChunkedResponse(key, chinkedList.get(0)); + Assertions.assertNotNull(messagesChains.get(key)); + Assertions.assertEquals(chinkedList.get(0), messagesChains.get(key).get(0)); + Assertions.assertEquals(1, messagesChains.size()); + verifyNoMoreInteractions(carrier); + + // handle second message response + service.handleChunkedResponse(key, chinkedList.get(1)); + Assertions.assertNotNull(messagesChains.get(key)); + Assertions.assertEquals(2, messagesChains.get(key).size()); + verifyNoMoreInteractions(carrier); + + // handle third message response + service.handleChunkedResponse(key, chinkedList.get(2)); + Assertions.assertNull(messagesChains.get(key)); + + FlowDumpResponse expectedResult = FlowDumpResponse.unite(chinkedList.stream().map(SpeakerDataResponse::getData) + .map(FlowDumpResponse.class::cast).collect(Collectors.toList())); + + verify(carrier, times(1)).sendResponse(key, expectedResult); + verifyNoMoreInteractions(carrier); + + //check that all caches in SpeakerWorkerForDumpsService are empty + + Assertions.assertTrue(messagesChains.isEmpty()); + Field chunkedMessageIdsPerRequestField = + SpeakerWorkerForDumpsService.class.getDeclaredField("chunkedMessageIdsPerRequest"); + chunkedMessageIdsPerRequestField.setAccessible(true); + @SuppressWarnings("unchecked") + Map> chunkedMessageIdsPerRequest = + (Map>) chunkedMessageIdsPerRequestField.get(service); + Assertions.assertTrue(chunkedMessageIdsPerRequest.isEmpty()); + + Field keyToRequestField = SpeakerWorkerForDumpsService.class.getDeclaredField("keyToRequest"); + keyToRequestField.setAccessible(true); + @SuppressWarnings("unchecked") + Map keyToRequest + = (Map) keyToRequestField.get(service); + Assertions.assertTrue(keyToRequest.isEmpty()); + } + + /** + * Tests the handleTimeout method to ensure it correctly handles timeout events by: + * - Removing the timed-out request from internal caches. + * - Sending an error response for each timed-out request. + * This test manually populates the internal maps with keys and simulates timeouts. + */ + @Test + public void handleTimeout() throws PipelineException, NoSuchFieldException, IllegalAccessException { + String key1 = "Key1"; + String key2 = "Key2"; + String key3 = "Key3"; + String key4 = "Key4"; + //populate caches + Field chunkedMessageIdsPerRequestField + = SpeakerWorkerForDumpsService.class.getDeclaredField("chunkedMessageIdsPerRequest"); + chunkedMessageIdsPerRequestField.setAccessible(true); + @SuppressWarnings("unchecked") + Map> chunkedMessageIdsPerRequest = + (Map>) chunkedMessageIdsPerRequestField.get(service); + chunkedMessageIdsPerRequest.put(key1, Collections.emptySet()); + chunkedMessageIdsPerRequest.put(key2, Collections.emptySet()); + chunkedMessageIdsPerRequest.put(key3, Collections.emptySet()); + chunkedMessageIdsPerRequest.put(key4, Collections.emptySet()); + + Field keyToRequestField = SpeakerWorkerForDumpsService.class.getDeclaredField("keyToRequest"); + keyToRequestField.setAccessible(true); + @SuppressWarnings("unchecked") + Map keyToRequest = + (Map) keyToRequestField.get(service); + keyToRequest.put(key1, mock(CommandMessage.class)); + keyToRequest.put(key2, mock(CommandMessage.class)); + keyToRequest.put(key3, mock(CommandMessage.class)); + keyToRequest.put(key4, mock(CommandMessage.class)); + + service.handleTimeout(key1); + Assertions.assertNull(keyToRequest.get(key1)); + Assertions.assertNull(chunkedMessageIdsPerRequest.get(key1)); + + service.handleTimeout(key2); + service.handleTimeout(key3); + service.handleTimeout(key4); + Assertions.assertTrue(keyToRequest.isEmpty()); + Assertions.assertTrue(chunkedMessageIdsPerRequest.isEmpty()); + } + + private FlowDumpResponse buildFlowDumpResponse(SwitchId switchId, long cookie) { + return new FlowDumpResponse(Lists.newArrayList(buildFlowSpeakerData(cookie)), switchId); + } + + private FlowSpeakerData buildFlowSpeakerData(long cookie) { + return FlowSpeakerData.builder() + .cookie(new FlowSegmentCookie(cookie)) + .build(); + } +} From 32614db47aa3c97415e2b75ced5f846820aa31b7 Mon Sep 17 00:00:00 2001 From: Yuliia Miroshnychenko Date: Fri, 12 Jul 2024 13:16:26 +0200 Subject: [PATCH 5/7] [TEST]: Regular Flow: Performance tests: Eliminating flowHelper(v1/v2) usage --- .../functionaltests/helpers/FlowHelper.groovy | 186 ------------- .../helpers/FlowHelperV2.groovy | 259 ------------------ .../config/HelpersConfig.groovy | 6 +- .../helpers/TopologyHelper.groovy | 5 +- .../performancetests/BaseSpecification.groovy | 43 ++- .../performancetests/SpecThreadScope.groovy | 31 +++ .../benchmark/ConcurrentFlowCreateSpec.groovy | 33 +-- .../ConcurrentFlowRerouteSpec.groovy | 36 ++- .../ConcurrentFlowValidateSpec.groovy | 33 +-- .../spec/benchmark/FlowCreateSpec.groovy | 33 +-- .../spec/benchmark/FlowDumpSpec.groovy | 31 +-- .../spec/benchmark/LinkDumpSpec.groovy | 34 +-- .../spec/{ => endurance}/DiscoverySpec.groovy | 3 +- .../spec/{ => endurance}/EnduranceSpec.groovy | 60 ++-- .../{ => endurance}/EnduranceV2Spec.groovy | 81 +++--- .../spec/{ => endurance}/VolumeSpec.groovy | 33 +-- ...amework.runtime.extension.IGlobalExtension | 2 +- .../src/test/resources/spring-context.xml | 9 + 18 files changed, 252 insertions(+), 666 deletions(-) delete mode 100644 src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/helpers/FlowHelper.groovy delete mode 100644 src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/helpers/FlowHelperV2.groovy create mode 100644 src-java/testing/performance-tests/src/test/groovy/org/openkilda/performancetests/SpecThreadScope.groovy rename src-java/testing/performance-tests/src/test/groovy/org/openkilda/performancetests/spec/{ => endurance}/DiscoverySpec.groovy (98%) rename src-java/testing/performance-tests/src/test/groovy/org/openkilda/performancetests/spec/{ => endurance}/EnduranceSpec.groovy (81%) rename src-java/testing/performance-tests/src/test/groovy/org/openkilda/performancetests/spec/{ => endurance}/EnduranceV2Spec.groovy (85%) rename src-java/testing/performance-tests/src/test/groovy/org/openkilda/performancetests/spec/{ => endurance}/VolumeSpec.groovy (55%) diff --git a/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/helpers/FlowHelper.groovy b/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/helpers/FlowHelper.groovy deleted file mode 100644 index a33089dac90..00000000000 --- a/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/helpers/FlowHelper.groovy +++ /dev/null @@ -1,186 +0,0 @@ -package org.openkilda.functionaltests.helpers - -import static org.openkilda.functionaltests.model.cleanup.CleanupActionType.DELETE_FLOW -import static org.openkilda.testing.Constants.FLOW_CRUD_TIMEOUT -import static org.springframework.beans.factory.config.ConfigurableBeanFactory.SCOPE_PROTOTYPE - -import org.openkilda.functionaltests.helpers.model.SwitchPair -import org.openkilda.functionaltests.model.cleanup.CleanupAfter -import org.openkilda.functionaltests.model.cleanup.CleanupManager -import org.openkilda.messaging.payload.flow.DetectConnectedDevicesPayload -import org.openkilda.messaging.payload.flow.FlowCreatePayload -import org.openkilda.messaging.payload.flow.FlowEndpointPayload -import org.openkilda.messaging.payload.flow.FlowPayload -import org.openkilda.messaging.payload.flow.FlowState -import org.openkilda.testing.model.topology.TopologyDefinition -import org.openkilda.testing.model.topology.TopologyDefinition.Switch -import org.openkilda.testing.service.database.Database -import org.openkilda.testing.service.northbound.NorthboundService - -import com.github.javafaker.Faker -import groovy.util.logging.Slf4j -import org.springframework.beans.factory.annotation.Autowired -import org.springframework.beans.factory.annotation.Qualifier -import org.springframework.context.annotation.Scope -import org.springframework.stereotype.Component - -import java.text.SimpleDateFormat - -/** - * Holds utility methods for manipulating flows. - */ -@Component -@Slf4j -@Scope(SCOPE_PROTOTYPE) -class FlowHelper { - @Autowired - TopologyDefinition topology - @Autowired @Qualifier("islandNb") - NorthboundService northbound - @Autowired - Database db - @Autowired - FlowHelperV2 flowHelperV2 - @Autowired - CleanupManager cleanupManager - - def random = new Random() - def faker = new Faker() - //Kilda allows user to pass reserved VLAN IDs 1 and 4095 if they want. - static final IntRange KILDA_ALLOWED_VLANS = 1..4095 - - /** - * Creates a FlowCreatePayload instance with random vlan and flow id. Will try to build over traffgen ports or use - * random port otherwise. - * Since multi-switch and single-switch flows have a bit different algorithms to create a correct flow, this method - * will delegate the job to the correct algo based on src and dst switches passed. - */ - FlowCreatePayload randomFlow(Switch srcSwitch, Switch dstSwitch, boolean useTraffgenPorts = true, - List existingFlows = []) { - if (srcSwitch.dpId == dstSwitch.dpId) { - return singleSwitchFlow(srcSwitch, useTraffgenPorts, existingFlows) - } else { - return randomMultiSwitchFlow(srcSwitch, dstSwitch, useTraffgenPorts, existingFlows) - } - } - - FlowCreatePayload randomFlow(SwitchPair switchPair, boolean useTraffgenPorts = true, - List existingFlows = []) { - randomFlow(switchPair.src, switchPair.dst, useTraffgenPorts, existingFlows) - } - - FlowCreatePayload randomMultiSwitchFlow(Switch srcSwitch, Switch dstSwitch, boolean useTraffgenPorts = true, - List existingFlows = []) { - Wrappers.retry(3, 0) { - def newFlow = new FlowCreatePayload(generateFlowId(), getFlowEndpoint(srcSwitch, useTraffgenPorts), - getFlowEndpoint(dstSwitch, useTraffgenPorts), 500, false, false, false, generateDescription(), - null, null, null, null, null, null, false, null, null) - if (flowConflicts(newFlow, existingFlows)) { - throw new Exception("Generated flow conflicts with existing flows. Flow: $newFlow") - } - return newFlow - } as FlowCreatePayload - } - - /** - * Creates a FlowCreatePayload instance with random vlan and flow id suitable for a single-switch flow. - * The flow will be on DIFFERENT PORTS. Will try to look for both ports to be traffgen ports. - * But if such port is not available, will pick a random one. So in order to run a correct traffic - * examination certain switch should have at least 2 traffgens connected to different ports. - */ - FlowCreatePayload singleSwitchFlow(Switch sw, boolean useTraffgenPorts = true, - List existingFlows = []) { - Wrappers.retry(3, 0) { - def srcEndpoint = getFlowEndpoint(sw, [], useTraffgenPorts) - def dstEndpoint = getFlowEndpoint(sw, [srcEndpoint.portNumber], useTraffgenPorts) - def newFlow = new FlowCreatePayload(generateFlowId(), srcEndpoint, dstEndpoint, 500, false, false, false, - generateDescription(), null, null, null, null, null, null, false, null, null) - if (flowConflicts(newFlow, existingFlows)) { - throw new Exception("Generated flow conflicts with existing flows. Flow: $newFlow") - } - return newFlow - } as FlowCreatePayload - } - - /** - * Adds flow with checking flow status and rules on source and destination switches. - * It is supposed if rules are installed on source and destination switches, the flow is completely created. - */ - FlowPayload addFlow(FlowPayload flow) { - log.debug("Adding flow '${flow.id}'") - def flowId = flow.getId() - cleanupManager.addAction(DELETE_FLOW, {flowHelperV2.safeDeleteFlow(flowId)}, CleanupAfter.TEST) - def response = northbound.addFlow(flow) - Wrappers.wait(FLOW_CRUD_TIMEOUT) { assert northbound.getFlowStatus(flow.id).status == FlowState.UP } - return response - } - - /** - * Check whether given potential flow is conflicting with any of flows in the given list. - * Usually used to ensure that some new flow is by accident is not conflicting with any of existing flows. - * Verifies conflicts by flow id and by port-vlan conflict on source or destination switch. - * - * @param newFlow this flow will be validated against the passed list - * @param existingFlows the passed flow will be validated against this list - * @return true if passed flow conflicts with any of the flows in the list - */ - static boolean flowConflicts(FlowPayload newFlow, List existingFlows) { - List existingEndpoints = existingFlows.collectMany { [it.source, it.destination] } - [newFlow.source, newFlow.destination].any { newEp -> - existingEndpoints.find { - newEp.datapath == it.datapath && newEp.portNumber == it.portNumber && - (newEp.vlanId == it.vlanId || it.vlanId == 0 || newEp.vlanId == 0) - } - } || existingFlows*.id.contains(newFlow.id) - } - - - /** - * Returns flow endpoint with randomly chosen vlan. - * - * @param useTraffgenPorts whether to try finding a traffgen port - */ - private FlowEndpointPayload getFlowEndpoint(Switch sw, boolean useTraffgenPorts = true) { - getFlowEndpoint(sw, [], useTraffgenPorts) - } - - /** - * Returns flow endpoint with randomly chosen vlan. - * - * @param excludePorts list of ports that should not be picked - * @param useTraffgenPorts if true, will try to use a port attached to a traffgen. The port must be present - * in 'allowedPorts' - */ - private FlowEndpointPayload getFlowEndpoint(Switch sw, List excludePorts, - boolean useTraffgenPorts = true) { - def ports = topology.getAllowedPortsForSwitch(sw) - excludePorts - int port = ports[random.nextInt(ports.size())] - if (useTraffgenPorts) { - List tgPorts = sw.traffGens*.switchPort - excludePorts - if (tgPorts) { - port = tgPorts[0] - } - } - return new FlowEndpointPayload(sw.dpId, port, - KILDA_ALLOWED_VLANS.shuffled().first(), - new DetectConnectedDevicesPayload(false, false)) - } - - /** - * Generates a unique name for all auto-tests flows. - */ - private String generateFlowId() { - return new SimpleDateFormat("ddMMMHHmmss_SSS", Locale.US).format(new Date()) + "_" + - faker.food().ingredient().toLowerCase().replaceAll(/\W/, "") + faker.number().digits(4) - } - - private String generateDescription() { - //The health of autotest flows is always questionable - def descpription = [faker.shakespeare().asYouLikeItQuote(), - faker.shakespeare().kingRichardIIIQuote(), - faker.shakespeare().romeoAndJulietQuote(), - faker.shakespeare().hamletQuote()] - def r = new Random() - "autotest flow: ${descpription[r.nextInt(descpription.size())]}" - } -} diff --git a/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/helpers/FlowHelperV2.groovy b/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/helpers/FlowHelperV2.groovy deleted file mode 100644 index 32e38617863..00000000000 --- a/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/helpers/FlowHelperV2.groovy +++ /dev/null @@ -1,259 +0,0 @@ -package org.openkilda.functionaltests.helpers - -import static org.openkilda.functionaltests.helpers.FlowHistoryConstants.CREATE_SUCCESS -import static org.openkilda.functionaltests.helpers.FlowHistoryConstants.DELETE_SUCCESS -import static org.openkilda.functionaltests.helpers.SwitchHelper.randomVlan -import static org.openkilda.functionaltests.model.cleanup.CleanupActionType.DELETE_FLOW -import static org.openkilda.functionaltests.model.cleanup.CleanupAfter.TEST -import static org.openkilda.messaging.payload.flow.FlowState.IN_PROGRESS -import static org.openkilda.messaging.payload.flow.FlowState.UP -import static org.openkilda.testing.Constants.FLOW_CRUD_TIMEOUT -import static org.openkilda.testing.Constants.WAIT_OFFSET -import static org.springframework.beans.factory.config.ConfigurableBeanFactory.SCOPE_PROTOTYPE - -import org.openkilda.functionaltests.helpers.model.SwitchPair -import org.openkilda.functionaltests.model.cleanup.CleanupManager -import org.openkilda.messaging.payload.flow.DetectConnectedDevicesPayload -import org.openkilda.messaging.payload.flow.FlowEndpointPayload -import org.openkilda.messaging.payload.flow.FlowPayload -import org.openkilda.messaging.payload.flow.FlowState -import org.openkilda.northbound.dto.v2.flows.DetectConnectedDevicesV2 -import org.openkilda.northbound.dto.v2.flows.FlowEndpointV2 -import org.openkilda.northbound.dto.v2.flows.FlowRequestV2 -import org.openkilda.northbound.dto.v2.flows.FlowResponseV2 -import org.openkilda.testing.model.topology.TopologyDefinition -import org.openkilda.testing.model.topology.TopologyDefinition.Switch -import org.openkilda.testing.service.northbound.NorthboundService -import org.openkilda.testing.service.northbound.NorthboundServiceV2 - -import com.github.javafaker.Faker -import groovy.util.logging.Slf4j -import org.springframework.beans.factory.annotation.Autowired -import org.springframework.beans.factory.annotation.Qualifier -import org.springframework.context.annotation.Scope -import org.springframework.stereotype.Component - -import java.text.SimpleDateFormat - -/** - * Holds utility methods for manipulating flows supporting version 2 of API. - */ -@Component -@Slf4j -@Scope(SCOPE_PROTOTYPE) -class FlowHelperV2 { - @Autowired - TopologyDefinition topology - @Autowired @Qualifier("islandNbV2") - NorthboundServiceV2 northboundV2 - @Autowired @Qualifier("islandNb") - NorthboundService northbound - @Autowired - CleanupManager cleanupManager - - def random = new Random() - def faker = new Faker() - - /** - * Creates a FlowRequestV2 instance with random vlan and flow id. - * Since multi-switch and single-switch flows have a bit different algorithms to create a correct flow, this method - * will delegate the job to the correct algo based on src and dst switches passed. - * - * @param srcSwitch source endpoint - * @param dstSwitch destination endpoint - * @param useTraffgenPorts try using traffgen ports if available - * @param existingFlows returned flow is guaranteed not to be in conflict with these flows - */ - FlowRequestV2 randomFlow(Switch srcSwitch, Switch dstSwitch, boolean useTraffgenPorts = true, - List existingFlows = []) { - if (srcSwitch.dpId == dstSwitch.dpId) { - return singleSwitchFlow(srcSwitch, useTraffgenPorts, existingFlows) - } else { - return randomMultiSwitchFlow(srcSwitch, dstSwitch, useTraffgenPorts, existingFlows) - } - } - - FlowRequestV2 randomFlow(SwitchPair switchPair, boolean useTraffgenPorts = true, - List existingFlows = []) { - randomFlow(switchPair.src, switchPair.dst, useTraffgenPorts, existingFlows) - } - - FlowRequestV2 randomMultiSwitchFlow(Switch srcSwitch, Switch dstSwitch, boolean useTraffgenPorts = true, - List existingFlows = []) { - Wrappers.retry(3, 0) { - def newFlow = FlowRequestV2.builder() - .flowId(generateFlowId()) - .source(getFlowEndpoint(srcSwitch, useTraffgenPorts)) - .destination(getFlowEndpoint(dstSwitch, useTraffgenPorts)) - .maximumBandwidth(500) - .ignoreBandwidth(false) - .periodicPings(false) - .description(generateDescription()) - .strictBandwidth(false) - .build() - - if (flowConflicts(newFlow, existingFlows)) { - throw new Exception("Generated flow conflicts with existing flows. Flow: $newFlow") - } - return newFlow - } as FlowRequestV2 - } - - /** - * Creates a FlowRequestV2 instance with random vlan and flow id suitable for a single-switch flow. - * The flow will be on DIFFERENT PORTS. Will try to look for both ports to be traffgen ports. - * But if such port is not available, will pick a random one. So in order to run a correct traffic - * examination certain switch should have at least 2 traffgens connected to different ports. - */ - FlowRequestV2 singleSwitchFlow(Switch sw, boolean useTraffgenPorts = true, - List existingFlows = []) { - Wrappers.retry(3, 0) { - def srcEndpoint = getFlowEndpoint(sw, [], useTraffgenPorts) - def dstEndpoint = getFlowEndpoint(sw, [srcEndpoint.portNumber], useTraffgenPorts) - def newFlow = FlowRequestV2.builder() - .flowId(generateFlowId()) - .source(srcEndpoint) - .destination(dstEndpoint) - .maximumBandwidth(500) - .ignoreBandwidth(false) - .periodicPings(false) - .description(generateDescription()) - .strictBandwidth(false) - .build() - if (flowConflicts(newFlow, existingFlows)) { - throw new Exception("Generated flow conflicts with existing flows. Flow: $newFlow") - } - return newFlow - } as FlowRequestV2 - } - - /** - * Adds flow and waits for it to become in expected state ('Up' by default) - */ - FlowResponseV2 addFlow(FlowRequestV2 flow, FlowState expectedFlowState = UP, cleanupAfter = TEST) { - log.debug("Adding flow '${flow.flowId}'") - def flowId = flow.getFlowId() - cleanupManager.addAction(DELETE_FLOW, {safeDeleteFlow(flowId)}, cleanupAfter) - def response = northboundV2.addFlow(flow) - Wrappers.wait(FLOW_CRUD_TIMEOUT) { - assert northboundV2.getFlowStatus(flowId).status == expectedFlowState - if (expectedFlowState != IN_PROGRESS) { - assert northbound.getFlowHistory(flowId).any {it.payload.last().action == CREATE_SUCCESS} - } - } - return response - } - - - /** - * Sends delete request for flow and waits for that flow to disappear from flows list - */ - FlowResponseV2 deleteFlow(String flowId) { - Wrappers.wait(WAIT_OFFSET * 2) { assert northboundV2.getFlowStatus(flowId).status != FlowState.IN_PROGRESS } - log.debug("Deleting flow '$flowId'") - def response = northboundV2.deleteFlow(flowId) - Wrappers.wait(FLOW_CRUD_TIMEOUT) { - assert !northboundV2.getFlowStatus(flowId) - assert northbound.getFlowHistory(flowId).find { it.payload.last().action == DELETE_SUCCESS } - } - return response - } - - def safeDeleteFlow(String flowId) { - if (flowId in northboundV2.getAllFlows()*.getFlowId()) { - deleteFlow(flowId) - } - } - - String generateFlowId() { - return new SimpleDateFormat("ddMMMHHmmss_SSS", Locale.US).format(new Date()) + "_" + - faker.food().ingredient().toLowerCase().replaceAll(/\W/, "") + faker.number().digits(4) - } - - /** - * Check whether given potential flow is conflicting with any of flows in the given list. - * Usually used to ensure that some new flow is by accident is not conflicting with any of existing flows. - * Verifies conflicts by flow id and by port-vlan conflict on source or destination switch. - * - * @param newFlow this flow will be validated against the passed list - * @param existingFlows the passed flow will be validated against this list - * @return true if passed flow conflicts with any of the flows in the list - */ - static boolean flowConflicts(FlowRequestV2 newFlow, List existingFlows) { - List existingEndpoints = existingFlows.collectMany { [it.source, it.destination] } - [newFlow.source, newFlow.destination].any { newEp -> - existingEndpoints.find { - newEp.switchId == it.switchId && newEp.portNumber == it.portNumber && - (newEp.vlanId == it.vlanId || it.vlanId == 0 || newEp.vlanId == 0) - } - } || existingFlows*.flowId.contains(newFlow.flowId) - } - - - - - static FlowRequestV2 toV2(FlowPayload flow) { - FlowRequestV2.builder() - .flowId(flow.id) - .description(flow.description) - .maximumBandwidth(flow.maximumBandwidth) - .ignoreBandwidth(flow.ignoreBandwidth) - .allocateProtectedPath(flow.allocateProtectedPath) - .periodicPings(flow.periodicPings) - .encapsulationType(flow.encapsulationType) - .maxLatency(flow.maxLatency) - .pinned(flow.pinned) - .priority(flow.priority) - .source(toV2(flow.source)) - .destination(toV2(flow.destination)) - .build() - } - - static FlowEndpointV2 toV2(FlowEndpointPayload ep) { - FlowEndpointV2.builder() - .switchId(ep.getSwitchDpId()) - .portNumber(ep.getPortId()) - .vlanId(ep.getVlanId()) - .detectConnectedDevices(toV2(ep.detectConnectedDevices)) - .build() - } - - static DetectConnectedDevicesV2 toV2(DetectConnectedDevicesPayload payload) { - new DetectConnectedDevicesV2(payload.lldp, payload.arp) - } - - /** - * Returns flow endpoint with randomly chosen vlan. - * - * @param useTraffgenPorts whether to try finding a traffgen port - */ - FlowEndpointV2 getFlowEndpoint(Switch sw, boolean useTraffgenPorts = true) { - getFlowEndpoint(sw, [], useTraffgenPorts) - } - - /** - * Returns flow endpoint with randomly chosen vlan. - * - * @param excludePorts list of ports that should not be picked - * @param useTraffgenPorts if true, will try to use a port attached to a traffgen - */ - FlowEndpointV2 getFlowEndpoint(Switch sw, List excludePorts, - boolean useTraffgenPorts = true) { - def ports = topology.getAllowedPortsForSwitch(sw) - excludePorts - int port = ports[random.nextInt(ports.size())] - if (useTraffgenPorts) { - List tgPorts = sw.traffGens*.switchPort - excludePorts - if (tgPorts) { - port = tgPorts[0] - } - } - return new FlowEndpointV2( - sw.dpId, port, randomVlan(), - new DetectConnectedDevicesV2(false, false)) - } - - private String generateDescription() { - def methods = ["asYouLikeItQuote", "kingRichardIIIQuote", "romeoAndJulietQuote", "hamletQuote"] - sprintf("autotest flow: %s", faker.shakespeare()."${methods[random.nextInt(methods.size())]}"()) - } -} diff --git a/src-java/testing/performance-tests/src/main/groovy/org/openkilda/performancetests/config/HelpersConfig.groovy b/src-java/testing/performance-tests/src/main/groovy/org/openkilda/performancetests/config/HelpersConfig.groovy index 657ccb89add..54f1370343c 100644 --- a/src-java/testing/performance-tests/src/main/groovy/org/openkilda/performancetests/config/HelpersConfig.groovy +++ b/src-java/testing/performance-tests/src/main/groovy/org/openkilda/performancetests/config/HelpersConfig.groovy @@ -4,6 +4,8 @@ import org.springframework.context.annotation.ComponentScan import org.springframework.context.annotation.Configuration @Configuration -@ComponentScan(basePackages = ["org.openkilda.functionaltests.helpers, org.openkilda.performancetests.helpers"]) -class HelpersConfig { +@ComponentScan(basePackages = ["org.openkilda.functionaltests.helpers", + "org.openkilda.performancetests.helpers", + "org.openkilda.functionaltests.model.cleanup" +])class HelpersConfig { } diff --git a/src-java/testing/performance-tests/src/main/groovy/org/openkilda/performancetests/helpers/TopologyHelper.groovy b/src-java/testing/performance-tests/src/main/groovy/org/openkilda/performancetests/helpers/TopologyHelper.groovy index 8167c645b57..5f8302a7d22 100644 --- a/src-java/testing/performance-tests/src/main/groovy/org/openkilda/performancetests/helpers/TopologyHelper.groovy +++ b/src-java/testing/performance-tests/src/main/groovy/org/openkilda/performancetests/helpers/TopologyHelper.groovy @@ -22,7 +22,7 @@ import org.springframework.stereotype.Component @Component("performance") class TopologyHelper extends org.openkilda.functionaltests.helpers.TopologyHelper { - @Autowired @Qualifier("islandNb") + @Autowired @Qualifier("northboundServiceImpl") NorthboundService northbound @Autowired LabService labService @@ -59,7 +59,7 @@ class TopologyHelper extends org.openkilda.functionaltests.helpers.TopologyHelpe def dst = topo.pickRandomSwitch([src]) topo.addIsl(src, dst) } - createTopology(topo); + createTopology(topo) return topo } @@ -68,6 +68,7 @@ class TopologyHelper extends org.openkilda.functionaltests.helpers.TopologyHelpe Wrappers.wait(30 + topo.activeSwitches.size() * 3, 5) { verifyTopology(topo) } + setTopology(topo) return topo } diff --git a/src-java/testing/performance-tests/src/test/groovy/org/openkilda/performancetests/BaseSpecification.groovy b/src-java/testing/performance-tests/src/test/groovy/org/openkilda/performancetests/BaseSpecification.groovy index 8a1f141c4ef..5d68def4739 100644 --- a/src-java/testing/performance-tests/src/test/groovy/org/openkilda/performancetests/BaseSpecification.groovy +++ b/src-java/testing/performance-tests/src/test/groovy/org/openkilda/performancetests/BaseSpecification.groovy @@ -1,15 +1,15 @@ package org.openkilda.performancetests import static org.openkilda.testing.Constants.WAIT_OFFSET - -import org.openkilda.functionaltests.helpers.FlowHelper -import org.openkilda.functionaltests.helpers.FlowHelperV2 -import org.openkilda.functionaltests.helpers.PathHelper import org.openkilda.functionaltests.helpers.PortAntiflapHelper import org.openkilda.functionaltests.helpers.Wrappers +import org.openkilda.functionaltests.helpers.factory.FlowFactory +import org.openkilda.functionaltests.helpers.model.FlowExtended +import org.openkilda.functionaltests.model.cleanup.CleanupManager import org.openkilda.messaging.model.system.FeatureTogglesDto import org.openkilda.messaging.model.system.KildaConfigurationDto import org.openkilda.performancetests.helpers.TopologyHelper +import org.openkilda.testing.model.topology.TopologyDefinition.Switch import org.openkilda.testing.service.database.Database import org.openkilda.testing.service.floodlight.FloodlightsHelper import org.openkilda.testing.service.labservice.LabService @@ -34,26 +34,23 @@ class BaseSpecification extends Specification { private static boolean healthCheckRan = false @Autowired - @Shared @Qualifier("islandNb") + @Shared @Qualifier("northboundServiceImpl") NorthboundService northbound @Autowired - @Shared @Qualifier("islandNbV2") + @Shared @Qualifier("northboundServiceV2Impl") NorthboundServiceV2 northboundV2 @Autowired @Shared FloodlightsHelper flHelper @Autowired @Shared - FlowHelperV2 flowHelperV2 - @Autowired - @Shared LabService labService @Autowired @Shared LockKeeperService lockKeeper @Autowired @Shared - FlowHelper flowHelper + FlowFactory flowFactory @Autowired @Shared @Qualifier("performance") @@ -67,9 +64,6 @@ class BaseSpecification extends Specification { @Autowired @Shared PortAntiflapHelper antiflap - @Autowired - @Shared - PathHelper pathHelper @Value('${discovery.generic.interval}') int discoveryInterval @@ -86,6 +80,10 @@ class BaseSpecification extends Specification { @Value('${perf.debug}') boolean debug + //CleanupManager is not called during perf-tests execution due to the implementation + //to use automatic cleanup the appropriate listeners should be added + static ThreadLocal threadLocalCleanupManager = new ThreadLocal<>() + def setupSpec() { healthCheck() northbound.getAllFlows().each { northbound.deleteFlow(it.id) } @@ -93,6 +91,10 @@ class BaseSpecification extends Specification { assert northbound.getAllFlows().empty } topoHelper.purgeTopology() + flowFactory.setNorthbound(northbound) + flowFactory.setNorthboundV2(northboundV2) + antiflap.setNorthbound(northbound) + antiflap.setNorthboundV2(northboundV2) } def healthCheck() { @@ -125,4 +127,19 @@ class BaseSpecification extends Specification { //setup with empty body in order to trigger a SETUP invocation, which is intercepted in several extensions //this can have implementation if required } + + Switch pickRandom(List switches) { + switches[new Random().nextInt(switches.size())] + } + + void deleteFlows( List flows) { + flows.each { it.sendDeleteRequest() } + def waitTime = northbound.getAllFlows().size() + if(waitTime < discoveryTimeout) { + waitTime = discoveryTimeout + } + Wrappers.wait(waitTime) { + northbound.getAllFlows().isEmpty() + } + } } diff --git a/src-java/testing/performance-tests/src/test/groovy/org/openkilda/performancetests/SpecThreadScope.groovy b/src-java/testing/performance-tests/src/test/groovy/org/openkilda/performancetests/SpecThreadScope.groovy new file mode 100644 index 00000000000..f6c3c3b56b7 --- /dev/null +++ b/src-java/testing/performance-tests/src/test/groovy/org/openkilda/performancetests/SpecThreadScope.groovy @@ -0,0 +1,31 @@ +package org.openkilda.performancetests; + +import org.openkilda.functionaltests.model.cleanup.CleanupManager + +import org.springframework.beans.factory.ObjectFactory; +import org.springframework.beans.factory.config.Scope; + +class SpecThreadScope implements Scope { + + @Override + Object get(String name, ObjectFactory objectFactory) { + + CleanupManager cleanupManager = BaseSpecification.threadLocalCleanupManager.get() + if (cleanupManager == null) { + cleanupManager = (CleanupManager) objectFactory.getObject() + BaseSpecification.threadLocalCleanupManager.set(cleanupManager) + } + return cleanupManager + } + @Override + Object remove(String name) { return null } + + @Override + void registerDestructionCallback(String name, Runnable callback) { } + + @Override + Object resolveContextualObject(String key) { return null } + + @Override + String getConversationId() { return null } +} diff --git a/src-java/testing/performance-tests/src/test/groovy/org/openkilda/performancetests/spec/benchmark/ConcurrentFlowCreateSpec.groovy b/src-java/testing/performance-tests/src/test/groovy/org/openkilda/performancetests/spec/benchmark/ConcurrentFlowCreateSpec.groovy index 794279df266..515269c2858 100644 --- a/src-java/testing/performance-tests/src/test/groovy/org/openkilda/performancetests/spec/benchmark/ConcurrentFlowCreateSpec.groovy +++ b/src-java/testing/performance-tests/src/test/groovy/org/openkilda/performancetests/spec/benchmark/ConcurrentFlowCreateSpec.groovy @@ -3,26 +3,20 @@ package org.openkilda.performancetests.spec.benchmark import static groovyx.gpars.GParsPool.withPool import org.openkilda.functionaltests.helpers.Wrappers +import org.openkilda.functionaltests.helpers.model.FlowExtended +import org.openkilda.functionaltests.helpers.model.SwitchPortVlan import org.openkilda.messaging.payload.flow.FlowState -import org.openkilda.model.cookie.Cookie -import org.openkilda.northbound.dto.v2.flows.FlowRequestV2 import org.openkilda.performancetests.BaseSpecification import org.openkilda.performancetests.helpers.TopologyBuilder -import org.openkilda.testing.model.topology.TopologyDefinition.Switch - -import spock.lang.Shared -import spock.lang.Unroll class ConcurrentFlowCreateSpec extends BaseSpecification { - @Shared - def r = new Random() def "Flow creation (concurrent) on mesh topology"() { given: "A mesh topology" def topo = new TopologyBuilder(flHelper.fls, preset.islandCount, preset.regionsPerIsland, preset.switchesPerRegion).buildMeshes() topoHelper.createTopology(topo) - flowHelperV2.setTopology(topo) + flowFactory.setTopology(topoHelper.topology) and: "A source switch" def srcSw = topo.switches.first() @@ -30,22 +24,27 @@ class ConcurrentFlowCreateSpec extends BaseSpecification { def allowedPorts = (1..(preset.flowCount + busyPorts.size())) - busyPorts when: "Create flows" - List flows = [] + List flows = [] + List busyEndpoints = [] withPool { allowedPorts.eachParallel { port -> - def flow = flowHelperV2.randomFlow(srcSw, pickRandom(topo.switches - srcSw), false, flows) - flow.allocateProtectedPath = false - flow.source.portNumber = port - northboundV2.addFlow(flow) + def flow = flowFactory.getBuilder(srcSw, pickRandom(topo.switches - srcSw), false, busyEndpoints) + .withProtectedPath(false) + .withSourcePort(port).build() + .sendCreateRequest() + busyEndpoints.addAll(flow.occupiedEndpoints()) flows << flow } } then: "Flows are created" Wrappers.wait(flows.size()) { - flows.forEach { assert northbound.getFlowStatus(it.flowId).status == FlowState.UP } + flows.forEach { assert it.retrieveFlowStatus().status == FlowState.UP } } + cleanup: "Remove all flows" + deleteFlows(flows) + where: preset << [ [ @@ -56,8 +55,4 @@ class ConcurrentFlowCreateSpec extends BaseSpecification { ] ] } - - Switch pickRandom(List switches) { - switches[r.nextInt(switches.size())] - } } diff --git a/src-java/testing/performance-tests/src/test/groovy/org/openkilda/performancetests/spec/benchmark/ConcurrentFlowRerouteSpec.groovy b/src-java/testing/performance-tests/src/test/groovy/org/openkilda/performancetests/spec/benchmark/ConcurrentFlowRerouteSpec.groovy index c232e6c0fcf..fdaad237769 100644 --- a/src-java/testing/performance-tests/src/test/groovy/org/openkilda/performancetests/spec/benchmark/ConcurrentFlowRerouteSpec.groovy +++ b/src-java/testing/performance-tests/src/test/groovy/org/openkilda/performancetests/spec/benchmark/ConcurrentFlowRerouteSpec.groovy @@ -3,27 +3,23 @@ package org.openkilda.performancetests.spec.benchmark import static groovyx.gpars.GParsPool.withPool import org.openkilda.functionaltests.helpers.Wrappers +import org.openkilda.functionaltests.helpers.model.FlowExtended +import org.openkilda.functionaltests.helpers.model.SwitchPortVlan import org.openkilda.messaging.payload.flow.FlowState -import org.openkilda.northbound.dto.v2.flows.FlowRequestV2 import org.openkilda.performancetests.BaseSpecification import org.openkilda.performancetests.helpers.TopologyBuilder -import org.openkilda.testing.model.topology.TopologyDefinition.Switch import groovy.util.logging.Slf4j -import spock.lang.Shared -import spock.lang.Unroll @Slf4j class ConcurrentFlowRerouteSpec extends BaseSpecification { - @Shared - def r = new Random() def "Flow reroute (concurrent) on mesh topology"() { given: "A mesh topology" def topo = new TopologyBuilder(flHelper.fls, preset.islandCount, preset.regionsPerIsland, preset.switchesPerRegion).buildMeshes() topoHelper.createTopology(topo) - flowHelperV2.setTopology(topo) + flowFactory.setTopology(topoHelper.topology) when: "A source switch" def srcSw = topo.switches.first() @@ -38,18 +34,21 @@ class ConcurrentFlowRerouteSpec extends BaseSpecification { islUtils.toLinkProps(isl, [cost: busyPortsFirstHalf.contains(isl.srcPort) ? "1" : "5000"]) }) - List flows = [] + List flows = [] + List busyEndpoints = [] allowedPorts.each { port -> - def flow = flowHelperV2.randomFlow(srcSw, pickRandom(topo.switches - srcSw), false, flows) - flow.allocateProtectedPath = false - flow.source.portNumber = port - flowHelperV2.addFlow(flow) + def flow = flowFactory.getBuilder(srcSw, pickRandom(topo.switches - srcSw), false, busyEndpoints) + .withProtectedPath(false).withSourcePort(port).build().create() + busyEndpoints.addAll(flow.occupiedEndpoints()) flows << flow } Collections.shuffle(flows) and: "Flows are created" assert flows.size() == preset.flowCount + Wrappers.wait(flows.size()) { + flows.forEach { assert it.retrieveFlowStatus().status == FlowState.UP } + } then: "Reroute flows" (1..(int)(preset.maxConcurrentReroutes / 10)).each { iteration -> @@ -63,19 +62,22 @@ class ConcurrentFlowRerouteSpec extends BaseSpecification { }) withPool(concurrentReroutes) { - flows[0..Math.min(flows.size() - 1, concurrentReroutes)].eachParallel { flow -> + flows[0..Math.min(flows.size() - 1, concurrentReroutes)].eachParallel { FlowExtended flow -> Wrappers.wait(flows.size()) { - northboundV2.rerouteFlow(flow.flowId) + flow.reroute() } } } Wrappers.wait(flows.size()) { - flows.forEach { assert northbound.getFlowStatus(it.flowId).status == FlowState.UP } + flows.forEach { assert it.retrieveFlowStatus().status == FlowState.UP } } } } + cleanup: "Remove all flows" + deleteFlows(flows) + where: preset << [ [ @@ -88,8 +90,4 @@ class ConcurrentFlowRerouteSpec extends BaseSpecification { ] ] } - - Switch pickRandom(List switches) { - switches[r.nextInt(switches.size())] - } } diff --git a/src-java/testing/performance-tests/src/test/groovy/org/openkilda/performancetests/spec/benchmark/ConcurrentFlowValidateSpec.groovy b/src-java/testing/performance-tests/src/test/groovy/org/openkilda/performancetests/spec/benchmark/ConcurrentFlowValidateSpec.groovy index a7219713430..512b776ac23 100644 --- a/src-java/testing/performance-tests/src/test/groovy/org/openkilda/performancetests/spec/benchmark/ConcurrentFlowValidateSpec.groovy +++ b/src-java/testing/performance-tests/src/test/groovy/org/openkilda/performancetests/spec/benchmark/ConcurrentFlowValidateSpec.groovy @@ -3,25 +3,19 @@ package org.openkilda.performancetests.spec.benchmark import static groovyx.gpars.GParsPool.withPool import org.openkilda.functionaltests.helpers.Wrappers -import org.openkilda.model.cookie.Cookie -import org.openkilda.northbound.dto.v2.flows.FlowRequestV2 +import org.openkilda.functionaltests.helpers.model.FlowExtended +import org.openkilda.functionaltests.helpers.model.SwitchPortVlan import org.openkilda.performancetests.BaseSpecification import org.openkilda.performancetests.helpers.TopologyBuilder -import org.openkilda.testing.model.topology.TopologyDefinition.Switch - -import spock.lang.Shared -import spock.lang.Unroll class ConcurrentFlowValidateSpec extends BaseSpecification { - @Shared - def r = new Random() def "Flow validation (concurrent) on mesh topology"() { given: "A mesh topology" def topo = new TopologyBuilder(flHelper.fls, preset.islandCount, preset.regionsPerIsland, preset.switchesPerRegion).buildMeshes() topoHelper.createTopology(topo) - flowHelperV2.setTopology(topo) + flowFactory.setTopology(topoHelper.topology) when: "A source switch" def srcSw = topo.switches.first() @@ -29,12 +23,14 @@ class ConcurrentFlowValidateSpec extends BaseSpecification { def allowedPorts = (1..(preset.flowCount + busyPorts.size())) - busyPorts and: "Create flows" - List flows = [] + List flows = [] + List busyEndpoints = [] allowedPorts.each { port -> - def flow = flowHelperV2.randomFlow(srcSw, pickRandom(topo.switches - srcSw), false, flows) - flow.allocateProtectedPath = false - flow.source.portNumber = port - flowHelperV2.addFlow(flow) + def flow = flowFactory.getBuilder(srcSw, pickRandom(topo.switches - srcSw), false, busyEndpoints) + .withProtectedPath(false) + .withSourcePort(port).build() + .create() + busyEndpoints.addAll(flow.occupiedEndpoints()) flows << flow } @@ -42,11 +38,14 @@ class ConcurrentFlowValidateSpec extends BaseSpecification { withPool { (1..preset.validateAttempts).each { Wrappers.wait(flows.size()) { - flows.eachParallel { northbound.validateFlow(it.flowId).each { assert it.asExpected } } + flows.eachParallel { FlowExtended flow -> assert flow.validateAndCollectDiscrepancies().isEmpty() } } } } + cleanup: "Remove all flows" + deleteFlows(flows) + where: preset << [ [ @@ -58,8 +57,4 @@ class ConcurrentFlowValidateSpec extends BaseSpecification { ] ] } - - Switch pickRandom(List switches) { - switches[r.nextInt(switches.size())] - } } diff --git a/src-java/testing/performance-tests/src/test/groovy/org/openkilda/performancetests/spec/benchmark/FlowCreateSpec.groovy b/src-java/testing/performance-tests/src/test/groovy/org/openkilda/performancetests/spec/benchmark/FlowCreateSpec.groovy index c026863fa91..cc1e3b208f8 100644 --- a/src-java/testing/performance-tests/src/test/groovy/org/openkilda/performancetests/spec/benchmark/FlowCreateSpec.groovy +++ b/src-java/testing/performance-tests/src/test/groovy/org/openkilda/performancetests/spec/benchmark/FlowCreateSpec.groovy @@ -1,25 +1,19 @@ package org.openkilda.performancetests.spec.benchmark -import org.openkilda.functionaltests.helpers.Wrappers -import org.openkilda.model.cookie.Cookie -import org.openkilda.northbound.dto.v2.flows.FlowRequestV2 + +import org.openkilda.functionaltests.helpers.model.FlowExtended +import org.openkilda.functionaltests.helpers.model.SwitchPortVlan import org.openkilda.performancetests.BaseSpecification import org.openkilda.performancetests.helpers.TopologyBuilder -import org.openkilda.testing.model.topology.TopologyDefinition.Switch - -import spock.lang.Shared -import spock.lang.Unroll class FlowCreateSpec extends BaseSpecification { - @Shared - def r = new Random() def "Flow creation on mesh topology"() { given: "A mesh topology" def topo = new TopologyBuilder(flHelper.fls, preset.islandCount, preset.regionsPerIsland, preset.switchesPerRegion).buildMeshes() topoHelper.createTopology(topo) - flowHelperV2.setTopology(topo) + flowFactory.setTopology(topoHelper.topology) when: "A source switch" def srcSw = topo.switches.first() @@ -27,17 +21,23 @@ class FlowCreateSpec extends BaseSpecification { def allowedPorts = (1..(preset.flowCount + busyPorts.size())) - busyPorts and: "Create flows" - List flows = [] + List flows = [] + List busyEndpoints = [] allowedPorts.each { port -> - def flow = flowHelperV2.randomFlow(srcSw, pickRandom(topo.switches - srcSw), false, flows) - flow.allocateProtectedPath = false - flow.source.portNumber = port - flowHelperV2.addFlow(flow) + def flow = flowFactory.getBuilder(srcSw, pickRandom(topo.switches - srcSw), false, busyEndpoints) + .withProtectedPath(false) + .withSourcePort(port).build() + .create() + busyEndpoints.addAll(flow.occupiedEndpoints()) flows << flow } then: "Flows are created" assert flows.size() == preset.flowCount + assert northboundV2.getAllFlows().size() == flows.size() + + cleanup: "Remove all flows" + deleteFlows(flows) where: preset << [ @@ -50,7 +50,4 @@ class FlowCreateSpec extends BaseSpecification { ] } - Switch pickRandom(List switches) { - switches[r.nextInt(switches.size())] - } } diff --git a/src-java/testing/performance-tests/src/test/groovy/org/openkilda/performancetests/spec/benchmark/FlowDumpSpec.groovy b/src-java/testing/performance-tests/src/test/groovy/org/openkilda/performancetests/spec/benchmark/FlowDumpSpec.groovy index 98ecf467ecb..f9e97d3e136 100644 --- a/src-java/testing/performance-tests/src/test/groovy/org/openkilda/performancetests/spec/benchmark/FlowDumpSpec.groovy +++ b/src-java/testing/performance-tests/src/test/groovy/org/openkilda/performancetests/spec/benchmark/FlowDumpSpec.groovy @@ -1,24 +1,18 @@ package org.openkilda.performancetests.spec.benchmark - -import org.openkilda.northbound.dto.v2.flows.FlowRequestV2 +import org.openkilda.functionaltests.helpers.model.FlowExtended +import org.openkilda.functionaltests.helpers.model.SwitchPortVlan import org.openkilda.performancetests.BaseSpecification import org.openkilda.performancetests.helpers.TopologyBuilder -import org.openkilda.testing.model.topology.TopologyDefinition.Switch - -import spock.lang.Shared -import spock.lang.Unroll class FlowDumpSpec extends BaseSpecification { - @Shared - def r = new Random() def "Flow dump on mesh topology"() { given: "A mesh topology" def topo = new TopologyBuilder(flHelper.fls, preset.islandCount, preset.regionsPerIsland, preset.switchesPerRegion).buildMeshes() topoHelper.createTopology(topo) - flowHelperV2.setTopology(topo) + flowFactory.setTopology(topoHelper.topology) when: "A source switch" def srcSw = topo.switches.first() @@ -26,12 +20,14 @@ class FlowDumpSpec extends BaseSpecification { def allowedPorts = (1..(preset.flowCount + busyPorts.size())) - busyPorts and: "Create flows" - List flows = [] + List flows = [] + List busyEndpoints = [] allowedPorts.each { port -> - def flow = flowHelperV2.randomFlow(srcSw, pickRandom(topo.switches - srcSw), false, flows) - flow.allocateProtectedPath = false - flow.source.portNumber = port - flowHelperV2.addFlow(flow) + def flow = flowFactory.getBuilder(srcSw, pickRandom(topo.switches - srcSw), false, busyEndpoints) + .withProtectedPath(false) + .withSourcePort(port).build() + .create() + busyEndpoints.addAll(flow.occupiedEndpoints()) flows << flow } @@ -43,6 +39,9 @@ class FlowDumpSpec extends BaseSpecification { assert northboundV2.getAllFlows().size() == preset.flowCount } + cleanup: "Remove all flows" + deleteFlows(flows) + where: preset << [ [ @@ -54,8 +53,4 @@ class FlowDumpSpec extends BaseSpecification { ] ] } - - Switch pickRandom(List switches) { - switches[r.nextInt(switches.size())] - } } diff --git a/src-java/testing/performance-tests/src/test/groovy/org/openkilda/performancetests/spec/benchmark/LinkDumpSpec.groovy b/src-java/testing/performance-tests/src/test/groovy/org/openkilda/performancetests/spec/benchmark/LinkDumpSpec.groovy index 00fd7a4257c..bc06ec294db 100644 --- a/src-java/testing/performance-tests/src/test/groovy/org/openkilda/performancetests/spec/benchmark/LinkDumpSpec.groovy +++ b/src-java/testing/performance-tests/src/test/groovy/org/openkilda/performancetests/spec/benchmark/LinkDumpSpec.groovy @@ -1,25 +1,19 @@ package org.openkilda.performancetests.spec.benchmark -import org.openkilda.functionaltests.helpers.Wrappers -import org.openkilda.model.cookie.Cookie -import org.openkilda.northbound.dto.v2.flows.FlowRequestV2 + +import org.openkilda.functionaltests.helpers.model.FlowExtended +import org.openkilda.functionaltests.helpers.model.SwitchPortVlan import org.openkilda.performancetests.BaseSpecification import org.openkilda.performancetests.helpers.TopologyBuilder -import org.openkilda.testing.model.topology.TopologyDefinition.Switch - -import spock.lang.Shared -import spock.lang.Unroll class LinkDumpSpec extends BaseSpecification { - @Shared - def r = new Random() def "Flow dump on mesh topology"() { given: "A mesh topology" def topo = new TopologyBuilder(flHelper.fls, preset.islandCount, preset.regionsPerIsland, preset.switchesPerRegion).buildMeshes() topoHelper.createTopology(topo) - flowHelperV2.setTopology(topo) + flowFactory.setTopology(topoHelper.topology) when: "A source switch" def srcSw = topo.switches.first() @@ -27,23 +21,29 @@ class LinkDumpSpec extends BaseSpecification { def allowedPorts = (1..(preset.flowCount + busyPorts.size())) - busyPorts and: "Create flows" - List flows = [] + List flows = [] + List busyEndpoints = [] allowedPorts.each { port -> - def flow = flowHelperV2.randomFlow(srcSw, pickRandom(topo.switches - srcSw), false, flows) - flow.allocateProtectedPath = false - flow.source.portNumber = port - flowHelperV2.addFlow(flow) + def flow = flowFactory.getBuilder(srcSw, pickRandom(topo.switches - srcSw), false, busyEndpoints) + .withProtectedPath(false) + .withSourcePort(port).build() + .create() + busyEndpoints.addAll(flow.occupiedEndpoints()) flows << flow } and: "Flows are created" assert flows.size() == preset.flowCount + assert northboundV2.getAllFlows().size() == flows.size() then: "Dump links" (1..preset.dumpAttempts).each { assert northbound.getActiveLinks().size() == topo.isls.size() * 2 } + cleanup: "Remove all flows" + deleteFlows(flows) + where: preset << [ [ @@ -55,8 +55,4 @@ class LinkDumpSpec extends BaseSpecification { ] ] } - - Switch pickRandom(List switches) { - switches[r.nextInt(switches.size())] - } } diff --git a/src-java/testing/performance-tests/src/test/groovy/org/openkilda/performancetests/spec/DiscoverySpec.groovy b/src-java/testing/performance-tests/src/test/groovy/org/openkilda/performancetests/spec/endurance/DiscoverySpec.groovy similarity index 98% rename from src-java/testing/performance-tests/src/test/groovy/org/openkilda/performancetests/spec/DiscoverySpec.groovy rename to src-java/testing/performance-tests/src/test/groovy/org/openkilda/performancetests/spec/endurance/DiscoverySpec.groovy index 197f09a4b2d..2846d014081 100644 --- a/src-java/testing/performance-tests/src/test/groovy/org/openkilda/performancetests/spec/DiscoverySpec.groovy +++ b/src-java/testing/performance-tests/src/test/groovy/org/openkilda/performancetests/spec/endurance/DiscoverySpec.groovy @@ -1,4 +1,4 @@ -package org.openkilda.performancetests.spec +package org.openkilda.performancetests.spec.endurance import static org.hamcrest.CoreMatchers.equalTo import static org.openkilda.testing.service.floodlight.model.FloodlightConnectMode.RW @@ -14,7 +14,6 @@ import org.openkilda.performancetests.model.CustomTopology import groovy.util.logging.Slf4j import org.junit.Assume import org.springframework.beans.factory.annotation.Value -import spock.lang.Unroll @Slf4j class DiscoverySpec extends BaseSpecification { diff --git a/src-java/testing/performance-tests/src/test/groovy/org/openkilda/performancetests/spec/EnduranceSpec.groovy b/src-java/testing/performance-tests/src/test/groovy/org/openkilda/performancetests/spec/endurance/EnduranceSpec.groovy similarity index 81% rename from src-java/testing/performance-tests/src/test/groovy/org/openkilda/performancetests/spec/EnduranceSpec.groovy rename to src-java/testing/performance-tests/src/test/groovy/org/openkilda/performancetests/spec/endurance/EnduranceSpec.groovy index 7508382da3f..f2ca28d0d55 100644 --- a/src-java/testing/performance-tests/src/test/groovy/org/openkilda/performancetests/spec/EnduranceSpec.groovy +++ b/src-java/testing/performance-tests/src/test/groovy/org/openkilda/performancetests/spec/endurance/EnduranceSpec.groovy @@ -1,4 +1,4 @@ -package org.openkilda.performancetests.spec +package org.openkilda.performancetests.spec.endurance import static groovyx.gpars.GParsPool.withPool import static groovyx.gpars.dataflow.Dataflow.task @@ -7,12 +7,14 @@ import static org.hamcrest.CoreMatchers.equalTo import org.openkilda.functionaltests.helpers.Dice import org.openkilda.functionaltests.helpers.Dice.Face import org.openkilda.functionaltests.helpers.Wrappers +import org.openkilda.functionaltests.helpers.model.FlowExtended +import org.openkilda.functionaltests.helpers.model.SwitchPortVlan import org.openkilda.messaging.info.event.IslChangeType -import org.openkilda.messaging.payload.flow.FlowPayload import org.openkilda.messaging.payload.flow.FlowState import org.openkilda.northbound.dto.v1.flows.PingInput import org.openkilda.performancetests.BaseSpecification import org.openkilda.performancetests.helpers.FlowPinger +import org.openkilda.performancetests.model.CustomTopology import org.openkilda.testing.model.topology.TopologyDefinition import org.openkilda.testing.model.topology.TopologyDefinition.Isl import org.openkilda.testing.tools.SoftAssertions @@ -22,7 +24,6 @@ import org.junit.Assume import org.springframework.beans.factory.annotation.Value import spock.lang.Ignore import spock.lang.Narrative -import spock.lang.Unroll import java.util.concurrent.TimeUnit @@ -34,7 +35,7 @@ class EnduranceSpec extends BaseSpecification { @Value('${reroute.delay}') int rerouteDelay def r = new Random() - List flows = Collections.synchronizedList(new ArrayList()) + List flows = Collections.synchronizedList(new ArrayList()) def setup() { topoHelper.purgeTopology() @@ -54,28 +55,27 @@ class EnduranceSpec extends BaseSpecification { setup: "Create a topology and a 'dice' with random events" def topo = topoHelper.createRandomTopology(preset.switchesAmount, preset.islsAmount) - topoHelper.setTopology(topo) - flowHelper.setTopology(topo) + flowFactory.setTopology(topo) def dice = new Dice([ new Face(chance: 25, event: this.&deleteFlow), - new Face(chance: 25, event: { createFlow(true) }), + new Face(chance: 25, event: { createFlow(topo,true) }), new Face(chance: 25, event: { blinkIsl(topo.isls) }), new Face(chance: 0, event: { TimeUnit.SECONDS.sleep(3) }), new Face(chance: 25, event: { massReroute(topo) }) ]) and: "As starting point, create some amount of random flows in it" - preset.flowsToStartWith.times { createFlow() } + preset.flowsToStartWith.times { createFlow(topo) } Wrappers.wait(flows.size() * 1.5) { flows.each { - assert northbound.getFlowStatus(it.id).status == FlowState.UP - northbound.validateFlow(it.id).each { direction -> assert direction.asExpected } + assert it.retrieveFlowStatus().status == FlowState.UP + assert it.validateAndCollectDiscrepancies().isEmpty() } } when: "With certain probability one of the following events occurs: flow creation, flow deletion, isl blink, \ idle, mass manual reroute. Step repeats pre-defined number of times" - def pinger = new FlowPinger(northbound, flows.collect { it.id }, rerouteDelay) + def pinger = new FlowPinger(northbound, flows.collect { it.flowId }, rerouteDelay) pinger.start() preset.eventsAmount.times { log.debug("running event #$it") @@ -91,9 +91,9 @@ idle, mass manual reroute. Step repeats pre-defined number of times" Wrappers.wait(60 + preset.switchesAmount) { def soft = new SoftAssertions() flows.each { flow -> - soft.checkSucceeds { assert northbound.getFlowStatus(flow.id).status == FlowState.UP } + soft.checkSucceeds { assert flow.retrieveFlowStatus().status == FlowState.UP } soft.checkSucceeds { - northbound.validateFlow(flow.id).each { direction -> assert direction.asExpected } + flow.validateAndCollectDiscrepancies().isEmpty() } } topo.switches.each { sw -> @@ -112,7 +112,7 @@ idle, mass manual reroute. Step repeats pre-defined number of times" cleanup: "delete flows and purge topology" pinger && !pinger.isStopped() && pinger.stop() - flows.each { northbound.deleteFlow(it.id) } + flows.each { it.sendDeleteRequestV1() } topo && topoHelper.purgeTopology(topo) where: @@ -146,12 +146,11 @@ idle, mass manual reroute. Step repeats pre-defined number of times" setup: "Create a topology" def topo = topoHelper.createRandomTopology(switchesAmount, islsAmount) - topoHelper.setTopology(topo) - flowHelper.setTopology(topo) + flowFactory.setTopology(topo) when: "Create 4094 flows" flowsAmount.times { - createFlow(false, false) + createFlow(topo, false, false) def numberOfCreatedFlow = it + 1 log.debug("Number of created flow: $numberOfCreatedFlow/$flowsAmount") @@ -164,7 +163,7 @@ idle, mass manual reroute. Step repeats pre-defined number of times" northbound.getAllFlows().size() == flowsAmount cleanup: "Delete flows and purge topology" - flows.each { northbound.deleteFlow(it.id) } + flows.each { it.sendDeleteRequestV1() } topoHelper.purgeTopology(topo) } @@ -176,12 +175,11 @@ idle, mass manual reroute. Step repeats pre-defined number of times" setup: "Create a topology" def topo = topoHelper.createRandomTopology(switchesAmount, islsAmount) - topoHelper.setTopology(topo) - flowHelper.setTopology(topo) + flowFactory.setTopology(topo) when: "Try to create 2047 flows" flowsAmount.times { - createFlow(false, true) + createFlow(topo, false, true) def numberOfCreatedFlow = it + 1 log.debug("Number of created flow: $numberOfCreatedFlow/$flowsAmount") @@ -194,18 +192,20 @@ idle, mass manual reroute. Step repeats pre-defined number of times" northbound.getAllFlows().size() == flowsAmount cleanup: "Delete flows and purge topology" - flows.each { northbound.deleteFlow(it.id) } + flows.each { it.sendDeleteRequestV1() } topoHelper.purgeTopology(topo) } //TODO(rtretiak): test that continuously add/remove different switches. Ensure no memory leak over time - def createFlow(waitForRules = false, boolean protectedPath = false) { + def createFlow(CustomTopology topo, waitForRules = false, boolean protectedPath = false) { + List busyEndpoints = flows.collect{ it.occupiedEndpoints() }.flatten() as List Wrappers.silent { - def flow = flowHelper.randomFlow(*topoHelper.getAllSwitchPairs().random(), false, flows) - flow.allocateProtectedPath = protectedPath - log.info "creating flow $flow.id" - waitForRules ? flowHelper.addFlow(flow) : northbound.addFlow(flow) + def flow = flowFactory.getBuilder(topo.switches.first(), pickRandom(topo.switches - topo.switches.first()), false, busyEndpoints) + .withProtectedPath(protectedPath) + .build() + log.info "creating flow $flow.flowId" + waitForRules ? flow.createV1() : flow.sendCreateRequestV1() flows << flow return flow } @@ -214,10 +214,10 @@ idle, mass manual reroute. Step repeats pre-defined number of times" def deleteFlow() { Wrappers.silent { def flowToDelete = flows.remove(r.nextInt(flows.size())) - log.info "deleting flow $flowToDelete.id" + log.info "deleting flow $flowToDelete.flowId" task { //delay the actual delete procedure to ensure no pings are in progress for the flow sleep(PingInput.DEFAULT_TIMEOUT) - northbound.deleteFlow(flowToDelete.id) + flowToDelete.sendDeleteRequestV1() } return flowToDelete } @@ -247,7 +247,7 @@ idle, mass manual reroute. Step repeats pre-defined number of times" Collections.shuffle(flows) task { withPool { - flows[0..flows.size() / 4].eachParallel { flow -> Wrappers.silent { northbound.rerouteFlow(flow.id) } + flows[0..flows.size() / 4].eachParallel { FlowExtended flow -> Wrappers.silent { flow.rerouteV1() } } } } diff --git a/src-java/testing/performance-tests/src/test/groovy/org/openkilda/performancetests/spec/EnduranceV2Spec.groovy b/src-java/testing/performance-tests/src/test/groovy/org/openkilda/performancetests/spec/endurance/EnduranceV2Spec.groovy similarity index 85% rename from src-java/testing/performance-tests/src/test/groovy/org/openkilda/performancetests/spec/EnduranceV2Spec.groovy rename to src-java/testing/performance-tests/src/test/groovy/org/openkilda/performancetests/spec/endurance/EnduranceV2Spec.groovy index 01a08e4d149..f8433d967c4 100644 --- a/src-java/testing/performance-tests/src/test/groovy/org/openkilda/performancetests/spec/EnduranceV2Spec.groovy +++ b/src-java/testing/performance-tests/src/test/groovy/org/openkilda/performancetests/spec/endurance/EnduranceV2Spec.groovy @@ -1,4 +1,4 @@ -package org.openkilda.performancetests.spec +package org.openkilda.performancetests.spec.endurance import static groovyx.gpars.GParsPool.withPool import static groovyx.gpars.dataflow.Dataflow.task @@ -9,11 +9,11 @@ import static org.openkilda.testing.service.floodlight.model.FloodlightConnectMo import org.openkilda.functionaltests.helpers.Dice import org.openkilda.functionaltests.helpers.Dice.Face import org.openkilda.functionaltests.helpers.Wrappers +import org.openkilda.functionaltests.helpers.model.FlowExtended +import org.openkilda.functionaltests.helpers.model.SwitchPortVlan import org.openkilda.messaging.info.event.IslChangeType import org.openkilda.messaging.payload.flow.FlowPayload import org.openkilda.messaging.payload.flow.FlowState -import org.openkilda.northbound.dto.v1.flows.PingInput -import org.openkilda.northbound.dto.v2.flows.FlowRequestV2 import org.openkilda.performancetests.BaseSpecification import org.openkilda.testing.model.topology.TopologyDefinition import org.openkilda.testing.model.topology.TopologyDefinition.Isl @@ -26,7 +26,6 @@ import org.springframework.web.client.HttpStatusCodeException import spock.lang.Ignore import spock.lang.Narrative import spock.lang.Shared -import spock.lang.Unroll import java.util.concurrent.TimeUnit @@ -41,11 +40,11 @@ class EnduranceV2Spec extends BaseSpecification { @Shared List brokenIsls @Shared - List flows + List flows def setup() { brokenIsls = Collections.synchronizedList(new ArrayList()) - flows = Collections.synchronizedList(new ArrayList()) + flows = Collections.synchronizedList(new ArrayList()) } /** @@ -63,7 +62,7 @@ class EnduranceV2Spec extends BaseSpecification { setup: "Create topology according to passed params" //cleanup any existing labs first - northbound.getAllFlows().each { northboundV2.deleteFlow(it.id) } + northbound.getAllFlows().each { FlowPayload flow -> northboundV2.deleteFlow(flow.id) } topoHelper.purgeTopology() setTopologies(topoHelper.createRandomTopology(preset.switchesAmount, preset.islsAmount)) @@ -71,8 +70,8 @@ class EnduranceV2Spec extends BaseSpecification { preset.flowsToStartWith.times { createFlow(makeFlowPayload()) } Wrappers.wait(flows.size() / 2) { flows.each { - assert northboundV2.getFlowStatus(it.flowId).status == FlowState.UP - northbound.validateFlow(it.flowId).each { direction -> assert direction.asExpected } + assert it.retrieveFlowStatus().status == FlowState.UP + assert it.validateAndCollectDiscrepancies().isEmpty() } } @@ -101,18 +100,21 @@ idle, mass manual reroute, isl break. Step repeats pre-defined number of times" def allFlows = northboundV2.getAllFlows() def assertions = new SoftAssertions() def pingVerifications = new SoftAssertions() - allFlows.findAll { it.status == FlowState.UP.toString() }.forEach { flow -> + + def upFlowIds = allFlows.findAll { it.status == FlowState.UP.toString() }.flowId + flows.findAll { it.flowId in upFlowIds }.forEach { flow -> pingVerifications.checkSucceeds { - def ping = northbound.pingFlow(flow.flowId, new PingInput()) + def ping = flow.ping() assert ping.forward.pingSuccess assert ping.reverse.pingSuccess } } and: "All Down flows are NOT pingable" - allFlows.findAll { it.status == FlowState.DOWN.toString() }.forEach { flow -> + def downFlowIds = allFlows.findAll { it.status == FlowState.DOWN.toString() }.flowId + flows.findAll { it.flowId in downFlowIds }.forEach { flow -> pingVerifications.checkSucceeds { - def ping = northbound.pingFlow(flow.flowId, new PingInput()) + def ping = flow.ping() assert !ping.forward.pingSuccess assert !ping.reverse.pingSuccess } @@ -142,7 +144,7 @@ idle, mass manual reroute, isl break. Step repeats pre-defined number of times" assertions.verify() cleanup: "delete flows and purge topology" - flows.each { northboundV2.deleteFlow(it.flowId) } + deleteFlows(flows) topology && topoHelper.purgeTopology(topology) where: @@ -165,11 +167,12 @@ idle, mass manual reroute, isl break. Step repeats pre-defined number of times" ] ] //define payload generating method that will be called each time flow creation is issued + makeFlowPayload = { - def flow = flowHelperV2.randomFlow(*topoHelper.getAllSwitchPairs().random(), - false, flows) - flow.maximumBandwidth = 200000 - flow.allocateProtectedPath = r.nextBoolean() + List busyEndpoints = flows.collect { it.occupiedEndpoints() }.flatten() as List + FlowExtended flow = flowFactory.getBuilder(topology.switches.first(), pickRandom(topology.switches - topology.switches.first()), false, busyEndpoints) + .withBandwidth(200000) + .withProtectedPath(r.nextBoolean()).build() return flow } //'dice' below defines events and their chances to appear @@ -193,7 +196,11 @@ idle, mass manual reroute, isl break. Step repeats pre-defined number of times" given: "A live env with certain topology deployed and existing flows" setTopologies(topoHelper.readCurrentTopology()) - flows.addAll(northbound.getAllFlows().collect { flowHelperV2.toV2(it) }) + topoHelper.setTopology(topology) + def existingFlows = northboundV2.getAllFlows().collect { + new FlowExtended(it, northbound, northboundV2, topology, flowFactory.cleanupManager, database) + } + flows.addAll(existingFlows) when: "With certain probability one of the following events occurs: flow creation, flow deletion, isl blink, \ idle, mass manual reroute, isl break. Step repeats for pre-defined amount of time" @@ -223,11 +230,12 @@ idle, mass manual reroute, isl break. Step repeats for pre-defined amount of tim def allFlows = northbound.getAllFlows() def assertions = new SoftAssertions() def pingVerifications = new SoftAssertions() + def upFlowIds = allFlows.findAll{ it.status == FlowState.UP.toString() }.id withPool(10) { - allFlows.findAll { it.status == FlowState.UP.toString() }.eachParallel { FlowPayload flow -> - if (isFlowPingable(flowHelperV2.toV2(flow))) { + flows.findAll { it.flowId in upFlowIds }.eachParallel { FlowExtended flow -> + if (isFlowPingable(flow)) { pingVerifications.checkSucceeds { - def ping = northbound.pingFlow(flow.id, new PingInput()) + def ping = flow.ping() assert ping.forward.pingSuccess assert ping.reverse.pingSuccess } @@ -236,11 +244,12 @@ idle, mass manual reroute, isl break. Step repeats for pre-defined amount of tim } ?: true and: "All Down flows are NOT pingable" + def downFlowIds = allFlows.findAll{ it.status == FlowState.DOWN.toString() }.id withPool(10) { - allFlows.findAll { it.status == FlowState.DOWN.toString() }.eachParallel { FlowPayload flow -> - if (isFlowPingable(flowHelperV2.toV2(flow))) { + flows.findAll{ it.flowId in downFlowIds }.eachParallel { FlowExtended flow -> + if (isFlowPingable(flow)) { pingVerifications.checkSucceeds { - def ping = northbound.pingFlow(flow.id, new PingInput()) + def ping = flow.ping() assert !ping.forward.pingSuccess assert !ping.reverse.pingSuccess } @@ -304,15 +313,14 @@ idle, mass manual reroute, isl break. Step repeats for pre-defined amount of tim def setTopologies(TopologyDefinition topology) { this.topology = topology - topoHelper.setTopology(topology) - flowHelper.setTopology(topology) - flowHelperV2.setTopology(topology) + flowFactory.topology = topology + } - def createFlow(FlowRequestV2 flow, waitForRules = false) { + def createFlow(FlowExtended flow, waitForRules = false) { Wrappers.silent { log.info "creating flow $flow.flowId" - waitForRules ? flowHelperV2.addFlow(flow) : northboundV2.addFlow(flow) + waitForRules ? flow.create() : flow.sendCreateRequest() flows << flow return flow } @@ -322,7 +330,7 @@ idle, mass manual reroute, isl break. Step repeats for pre-defined amount of tim def flowToDelete = flows[r.nextInt(flows.size())] log.info "deleting flow $flowToDelete.flowId" try { - northboundV2.deleteFlow(flowToDelete.flowId) + flowToDelete.sendDeleteRequest() } catch (HttpStatusCodeException e) { if (e.statusCode == HttpStatus.NOT_FOUND) { //flow already removed, do nothing @@ -339,9 +347,8 @@ idle, mass manual reroute, isl break. Step repeats for pre-defined amount of tim def flowToUpdate = flows[r.nextInt(flows.size())] log.info "updating flow $flowToUpdate.flowId" Wrappers.silent { - northboundV2.updateFlow(flowToUpdate.flowId, flowToUpdate.tap { - it.maximumBandwidth = it.maximumBandwidth + r.nextInt(10000) - }) + flowToUpdate.update(flowToUpdate.tap { it.maximumBandwidth = it.maximumBandwidth + r.nextInt(10000) }) + } return flowToUpdate } @@ -376,11 +383,11 @@ idle, mass manual reroute, isl break. Step repeats for pre-defined amount of tim Collections.shuffle(flows) task { withPool { - flows[0..flows.size() / 20].eachParallel { flow -> + flows[0..flows.size() / 20].eachParallel { FlowExtended flow -> Wrappers.silent { //may fail due to 'in progress' status, just retry Wrappers.retry(5, 1, {}) { - northboundV2.rerouteFlow(flow.flowId) + flow.reroute() } } } @@ -399,7 +406,7 @@ idle, mass manual reroute, isl break. Step repeats for pre-defined amount of tim }.then({ it }, { throw it }) } - boolean isFlowPingable(FlowRequestV2 flow) { + boolean isFlowPingable(FlowExtended flow) { if (flow.source.switchId == flow.destination.switchId) { return false } else { diff --git a/src-java/testing/performance-tests/src/test/groovy/org/openkilda/performancetests/spec/VolumeSpec.groovy b/src-java/testing/performance-tests/src/test/groovy/org/openkilda/performancetests/spec/endurance/VolumeSpec.groovy similarity index 55% rename from src-java/testing/performance-tests/src/test/groovy/org/openkilda/performancetests/spec/VolumeSpec.groovy rename to src-java/testing/performance-tests/src/test/groovy/org/openkilda/performancetests/spec/endurance/VolumeSpec.groovy index f78c78ed731..c96510058f6 100644 --- a/src-java/testing/performance-tests/src/test/groovy/org/openkilda/performancetests/spec/VolumeSpec.groovy +++ b/src-java/testing/performance-tests/src/test/groovy/org/openkilda/performancetests/spec/endurance/VolumeSpec.groovy @@ -1,10 +1,9 @@ -package org.openkilda.performancetests.spec +package org.openkilda.performancetests.spec.endurance import org.openkilda.functionaltests.helpers.Wrappers -import org.openkilda.model.cookie.Cookie -import org.openkilda.northbound.dto.v2.flows.FlowRequestV2 +import org.openkilda.functionaltests.helpers.model.FlowExtended +import org.openkilda.functionaltests.helpers.model.SwitchPortVlan import org.openkilda.performancetests.BaseSpecification -import org.openkilda.testing.model.topology.TopologyDefinition.Switch import spock.lang.Shared @@ -20,8 +19,7 @@ class VolumeSpec extends BaseSpecification { def "Able to validate a switch with a lot of flows on different ports"() { given: "A small topology" def topo = topoHelper.createRandomTopology(5, 15) - topoHelper.setTopology(topo) - flowHelperV2.setTopology(topo) + flowFactory.setTopology(topo) and: "A switch under test" def sw = topo.switches.first() @@ -29,18 +27,18 @@ class VolumeSpec extends BaseSpecification { def allowedPorts = (1..200 + busyPorts.size()) - busyPorts //200 total when: "Create total 200 flows on a switch, each flow uses free non-isl port" - List flows = [] + List flows = [] + List busyEndpoints = [] allowedPorts.each { port -> - def flow = flowHelperV2.randomFlow(sw, pickRandom(topo.switches - sw), false, flows) - flow.allocateProtectedPath = r.nextBoolean() - flow.source.portNumber = port - northboundV2.addFlow(flow) + def flow = flowFactory.getBuilder(sw, pickRandom(topo.switches - sw), false, busyEndpoints) + .withProtectedPath(r.nextBoolean()).withSourcePort(port).build().sendCreateRequest() + busyEndpoints.addAll(flow.occupiedEndpoints()) flows << flow } then: "Each flow passes flow validation" Wrappers.wait(flows.size()) { - flows.forEach { northbound.validateFlow(it.flowId).each { assert it.asExpected } } + flows.forEach { assert it.validateAndCollectDiscrepancies().isEmpty() } } and: "Target switch passes switch validation" @@ -50,16 +48,7 @@ class VolumeSpec extends BaseSpecification { } cleanup: "Remove all flows, delete topology" - flows.each { northbound.deleteFlow(it.flowId) } - Wrappers.wait(flows.size()) { - topo.switches.each { - assert northbound.getSwitchRules(it.dpId).flowEntries.findAll { !Cookie.isDefaultRule(it.cookie) }.empty - } - } + deleteFlows(flows) topoHelper.purgeTopology(topo) } - - Switch pickRandom(List switches) { - switches[r.nextInt(switches.size())] - } } diff --git a/src-java/testing/performance-tests/src/test/resources/META-INF/services/org.spockframework.runtime.extension.IGlobalExtension b/src-java/testing/performance-tests/src/test/resources/META-INF/services/org.spockframework.runtime.extension.IGlobalExtension index 2745a9bd8db..04e73a8e1ee 100644 --- a/src-java/testing/performance-tests/src/test/resources/META-INF/services/org.spockframework.runtime.extension.IGlobalExtension +++ b/src-java/testing/performance-tests/src/test/resources/META-INF/services/org.spockframework.runtime.extension.IGlobalExtension @@ -1,3 +1,3 @@ org.openkilda.functionaltests.extension.SkippedTestsLogger org.openkilda.functionaltests.extension.tags.TagExtension -org.openkilda.functionaltests.extension.env.AssumeProfileExtension +org.openkilda.functionaltests.extension.env.AssumeProfileExtension \ No newline at end of file diff --git a/src-java/testing/performance-tests/src/test/resources/spring-context.xml b/src-java/testing/performance-tests/src/test/resources/spring-context.xml index 2d1d5cb4536..a229b3ddc84 100644 --- a/src-java/testing/performance-tests/src/test/resources/spring-context.xml +++ b/src-java/testing/performance-tests/src/test/resources/spring-context.xml @@ -7,4 +7,13 @@ + + + + + + + + + From d27b5bbd3e1df62122a44e4c4d3dd229bfdd89d4 Mon Sep 17 00:00:00 2001 From: Pablo Murillo Date: Wed, 17 Jul 2024 15:17:12 +0200 Subject: [PATCH 6/7] Add a delta to compare all the metric rates There is a physical switch that stores the metric rate number using a floating point variable. This means that sometimes the rate value is not properly compared with the expected value. This commit applies a max delta to all the metric rates comparations that allow to introduce an expected error during the comparison. Closes #5638 --- .../main/java/org/openkilda/model/Meter.java | 12 ++++++++--- .../java/org/openkilda/model/MeterTest.java | 10 ++++++++++ .../impl/ValidationServiceImplTest.java | 20 +++++++++++++++++++ 3 files changed, 39 insertions(+), 3 deletions(-) diff --git a/src-java/kilda-model/src/main/java/org/openkilda/model/Meter.java b/src-java/kilda-model/src/main/java/org/openkilda/model/Meter.java index 48ab50a68ee..d8fd3c3efb5 100644 --- a/src-java/kilda-model/src/main/java/org/openkilda/model/Meter.java +++ b/src-java/kilda-model/src/main/java/org/openkilda/model/Meter.java @@ -30,6 +30,7 @@ public final class Meter implements Serializable { public static final int MIN_RATE_IN_KBPS = 64; private static final int METER_BURST_SIZE_EQUALS_DELTA = 1; + private static final int E_SWITCH_METER_RATE_EQUALS_DELTA = 1; private static final double E_SWITCH_METER_RATE_EQUALS_DELTA_COEFFICIENT = 0.01; private static final double E_SWITCH_METER_BURST_SIZE_EQUALS_DELTA_COEFFICIENT = 0.01; @@ -117,10 +118,15 @@ public static boolean equalsRate(long actual, long expected, boolean isESwitch) // E-switches have a bug when installing the rate and burst size. // Such switch sets the rate different from the rate that was sent to it. // Therefore, we compare actual and expected values ​​using the delta coefficient. - if (isESwitch) { - return Math.abs(actual - expected) <= expected * E_SWITCH_METER_RATE_EQUALS_DELTA_COEFFICIENT; + if (!isESwitch) { + return actual == expected; + } + //this is a workaround for the bug in the E-switches when the rate is too small (less than 100 if delta + // coefficient is 0.01) + if (expected * E_SWITCH_METER_RATE_EQUALS_DELTA_COEFFICIENT < E_SWITCH_METER_RATE_EQUALS_DELTA) { + return Math.abs(actual - expected) <= E_SWITCH_METER_RATE_EQUALS_DELTA; } - return actual == expected; + return Math.abs(actual - expected) <= expected * E_SWITCH_METER_RATE_EQUALS_DELTA_COEFFICIENT; } /** diff --git a/src-java/kilda-model/src/test/java/org/openkilda/model/MeterTest.java b/src-java/kilda-model/src/test/java/org/openkilda/model/MeterTest.java index 5e3412dd888..668118980d2 100644 --- a/src-java/kilda-model/src/test/java/org/openkilda/model/MeterTest.java +++ b/src-java/kilda-model/src/test/java/org/openkilda/model/MeterTest.java @@ -17,6 +17,8 @@ import static com.google.common.collect.Sets.newHashSet; import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; import static org.openkilda.model.SwitchFeature.MAX_BURST_COEFFICIENT_LIMITATION; import org.junit.jupiter.api.Test; @@ -55,4 +57,12 @@ public void convertCalculateBurstSizeConsideringHardwareLimitations() { assertEquals(100, Meter.calculateBurstSizeConsideringHardwareLimitations(100, 100, limitationFeature)); assertEquals(10, Meter.calculateBurstSizeConsideringHardwareLimitations(100, 10, limitationFeature)); } + + @Test + public void equalsRate() { + assertTrue(Meter.equalsRate(100, 100, false)); + assertFalse(Meter.equalsRate(65, 64, false)); + assertTrue(Meter.equalsRate(10003, 10000, true)); + assertTrue(Meter.equalsRate(65, 64, true)); + } } diff --git a/src-java/swmanager-topology/swmanager-storm-topology/src/test/java/org/openkilda/wfm/topology/switchmanager/service/impl/ValidationServiceImplTest.java b/src-java/swmanager-topology/swmanager-storm-topology/src/test/java/org/openkilda/wfm/topology/switchmanager/service/impl/ValidationServiceImplTest.java index e52c0a13864..47233713ae5 100644 --- a/src-java/swmanager-topology/swmanager-storm-topology/src/test/java/org/openkilda/wfm/topology/switchmanager/service/impl/ValidationServiceImplTest.java +++ b/src-java/swmanager-topology/swmanager-storm-topology/src/test/java/org/openkilda/wfm/topology/switchmanager/service/impl/ValidationServiceImplTest.java @@ -303,6 +303,26 @@ public void validateMetersProperMeters() { meter.getBurst(), meter.getFlags().stream().map(MeterFlag::name).collect(Collectors.toSet())); } + @Test + public void validateProperMetersSlightlyDifferentMeters() { + ValidationService validationService = new ValidationServiceImpl(persistenceManager().build(), ruleManager); + MeterSpeakerData meter1 = buildFullMeterSpeakerCommandData(32, 64, 10500, + Sets.newHashSet(MeterFlag.KBPS, MeterFlag.BURST, MeterFlag.STATS)); + MeterSpeakerData meter2 = buildFullMeterSpeakerCommandData(32, 65, 10500, + Sets.newHashSet(MeterFlag.KBPS, MeterFlag.BURST, MeterFlag.STATS)); + + ValidateMetersResultV2 response = validationService.validateMeters(SWITCH_ID_E, + singletonList(meter1), + singletonList(meter2), + true, false); + + assertTrue(response.getMissingMeters().isEmpty()); + assertTrue(response.getMisconfiguredMeters().isEmpty()); + assertFalse(response.getProperMeters().isEmpty()); + assertTrue(response.getExcessMeters().isEmpty()); + assertTrue(response.isAsExpected()); + } + @Test public void validateMetersMissingAndExcessMeters() { ValidationService validationService = new ValidationServiceImpl(persistenceManager().build(), ruleManager); From cfe6afa5c49ed6f426c8e89faefe081f734c0901 Mon Sep 17 00:00:00 2001 From: Pablo Murillo Date: Wed, 7 Aug 2024 16:28:44 +0200 Subject: [PATCH 7/7] Update CHANGELOG.md --- CHANGELOG.md | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 584e0437ff1..9990d468393 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,4 +1,20 @@ # Changelog +## v1.161.0 (08/08/2024) + +### Bug Fixes: +- [#5711](https://github.com/telstra/open-kilda/pull/5711) Add a delta to compare all the metric rates (Issue: [#5638](https://github.com/telstra/open-kilda/issues/5638)) +- [#5716](https://github.com/telstra/open-kilda/pull/5716) Add split kafka message support for flow validation process (Issue: [#5718](https://github.com/telstra/open-kilda/issues/5718)) + +### Improvements: +- [#5705](https://github.com/telstra/open-kilda/pull/5705) [TEST]: Regular Flow: New interaction approach: Func-tests: Eliminating flowHelper(v1/v2) usage [**tests**] +- [#5709](https://github.com/telstra/open-kilda/pull/5709) Update git hook +- [#5713](https://github.com/telstra/open-kilda/pull/5713) [TEST]: Regular Flow: Performance tests: Eliminating flowHelper(v1/v2) usage [**tests**] +- [#5722](https://github.com/telstra/open-kilda/pull/5722) [TEST]: Issue 5699: Flaky test: Flow with protected path(reroute) [**tests**] + + +For the complete list of changes, check out [the commit log](https://github.com/telstra/open-kilda/compare/v.160.0...v1.161.0). + +--- ## v1.160.0 (01/08/2024)