Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Node associated with orphaned member not getting cleaned up #1226

Merged
merged 1 commit into from
Jan 19, 2018
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
19 changes: 19 additions & 0 deletions f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py
Original file line number Diff line number Diff line change
Expand Up @@ -803,6 +803,7 @@ def clean_orphaned_objects_and_save_device_config(self):
pools = self.lbdriver.get_all_deployed_pools()
if pools:
self.purge_orphaned_pools(pools)
self.purge_orphaned_nodes(pools)

# Ask the BIG-IP for all deployed monitors not associated
# to a pool
Expand Down Expand Up @@ -892,6 +893,24 @@ def purge_orphaned_l7_policys(self, policies):
l7_policy_id=policy_key,
hostname=policy['hostnames'])

@log_helpers.log_method_call
def purge_orphaned_nodes(self, pools):
"""Deletes hanging pools from the deleted listeners"""
pools_members = self.plugin_rpc.get_pools_members(
list(pools.keys()))

tenant_members = dict()
for pool_id, pool in pools.iteritems():
tenant_id = pool['tenant_id']
members = pools_members.get(pool_id, list())

if tenant_id not in tenant_members:
tenant_members[tenant_id] = members
else:
tenant_members[tenant_id].extend(members)

self.lbdriver.purge_orphaned_nodes(tenant_members)

@log_helpers.log_method_call
def purge_orphaned_pools(self, pools):
"""Deletes hanging pools from the deleted listeners"""
Expand Down
48 changes: 45 additions & 3 deletions f5_openstack_agent/lbaasv2/drivers/bigip/icontrol_driver.py
Original file line number Diff line number Diff line change
Expand Up @@ -1143,6 +1143,32 @@ def get_all_deployed_listeners(self):
}
return deployed_virtual_dict

@serialized('purge_orphaned_nodes')
@is_operational
@log_helpers.log_method_call
def purge_orphaned_nodes(self, tenant_members):
node_helper = resource_helper.BigIPResourceHelper(
resource_helper.ResourceType.node)
for bigip in self.get_all_bigips():
for tenant_id, members in tenant_members.iteritems():
partition = self.service_adapter.prefix + tenant_id
nodes = node_helper.get_resources(bigip, partition=partition)
node_dict = {n.name: n for n in nodes}

for member in members:
rd = self.network_builder.find_subnet_route_domain(
tenant_id, member.get('subnet_id', None))
node_name = "{}%{}".format(member['address'], rd)
node_dict.pop(node_name, None)

for node_name, node in node_dict.iteritems():
try:
node_helper.delete(bigip, name=urllib.quote(node_name),
partition=partition)
except HTTPError as error:
if error.response.status_code == 400:
LOG.error(error.response)

@serialized('get_all_deployed_pools')
@is_operational
def get_all_deployed_pools(self):
Expand Down Expand Up @@ -1774,10 +1800,26 @@ def _service_exists(self, service):
name=bigip_pool['name'],
partition=folder_name):
LOG.error("Pool /%s/%s not found on bigip: %s" %
(bigip_pool['name'], folder_name,
(folder_name, bigip_pool['name'],
bigip.hostname))
return False
else:
deployed_pool = self.pool_manager.load(
bigip,
name=bigip_pool['name'],
partition=folder_name)
deployed_members = \
deployed_pool.members_s.get_collection()

# First check that number of members deployed
# is equal to the number in the service.
if len(deployed_members) != len(pool['members']):
LOG.error("Pool %s members member count mismatch "
"match: deployed %d != service %d" %
(bigip_pool['name'], len(deployed_members),
len(pool['members'])))
return False

# Ensure each pool member exists
for member in service['members']:
if member['pool_id'] == pool['id']:
Expand All @@ -1788,8 +1830,8 @@ def _service_exists(self, service):
"member": member,
"pool": pool}
if not lb.pool_builder.member_exists(svc, bigip):
LOG.warn("Pool member not found: %s",
svc['member'])
LOG.error("Pool member not found: %s" %
svc['member'])
return False

# Ensure that each health monitor exists.
Expand Down
15 changes: 15 additions & 0 deletions f5_openstack_agent/lbaasv2/drivers/bigip/network_service.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@

import itertools
import netaddr
from requests import HTTPError

from neutron.plugins.common import constants as plugin_const
from neutron_lib.exceptions import NeutronException
Expand Down Expand Up @@ -268,6 +269,20 @@ def _annotate_service_route_domains(self, service):
def is_common_network(self, network):
return self.l2_service.is_common_network(network)

def find_subnet_route_domain(self, tenant_id, subnet_id):
rd_id = 0
bigip = self.driver.get_bigip()
partition_id = self.service_adapter.get_folder_name(
tenant_id)
try:
tenant_rd = self.network_helper.get_route_domain(
bigip, partition=partition_id)
rd_id = tenant_rd.id
except HTTPError as error:
LOG.error(error)

return rd_id

def assign_route_domain(self, tenant_id, network, subnet):
# Assign route domain for a network
if self.l2_service.is_common_network(network):
Expand Down
18 changes: 18 additions & 0 deletions f5_openstack_agent/lbaasv2/drivers/bigip/plugin_rpc.py
Original file line number Diff line number Diff line change
Expand Up @@ -680,3 +680,21 @@ def validate_pools_state(self, pools):
"validate_pool_state")

return pool_status

@log_helpers.log_method_call
def get_pools_members(self, pools):
"""Get the members of a list of pools IDs in Neutron."""
pools_members = {}
try:
pools_members = self._call(
self.context,
self._make_msg('get_pools_members',
pools=pools,
host=self.host),
topic=self.topic
)
except messaging.MessageDeliveryFailure:
LOG.error("agent->plugin RPC exception caught: ",
"get_pools_members")

return pools_members
Original file line number Diff line number Diff line change
Expand Up @@ -166,6 +166,7 @@ def mock_all_purges(self, target=None):
self.mock_purge_orphaned_listeners(target)
self.mock_purge_orphaned_l7_policys(target)
self.mock_purge_orphaned_pools(target)
self.mock_purge_orphaned_nodes(target)
self.mock_purge_orphaned_health_monitors(target)

def mock_purge_orphaned_loadbalancers(
Expand Down Expand Up @@ -232,6 +233,22 @@ def mock_purge_orphaned_pools(
call_cnt, expected_args, kwargs)
return target

def mock_purge_orphaned_nodes(
self, target=None, call_cnt=1, static=None, expected_args=None,
**kwargs):
"""Mocks the target's purge_orphaned_nodes method

The given kwargs will be passed to the mock.Mock call

This will also create a new fully_mocked_target if target is not
specified.
"""
if not target:
target = self.new_fully_mocked_target()
self._mockfactory(target, 'purge_orphaned_nodes', static,
call_cnt, expected_args, kwargs)
return target

def mock_purge_orphaned_health_monitors(
self, target=None, call_cnt=1, static=None, expected_args=None,
**kwargs):
Expand Down