Skip to content

Commit

Permalink
fix userns + restart policy with slirp4netns
Browse files Browse the repository at this point in the history
Currently we deadlock in the slirp4netns setup code as we try to
configure an non exissting netns. The problem happens because we tear
down the netns in the userns case correctly since commit bbd6281 but
that introduces this slirp4netns problem. The code does a proper new
network setup later so we should only use the short cut when not in a
userns.

Fixes containers#21477

Signed-off-by: Paul Holzinger <pholzing@redhat.com>
  • Loading branch information
Luap99 committed Feb 6, 2024
1 parent a2f0a44 commit 7d15bc2
Show file tree
Hide file tree
Showing 2 changed files with 26 additions and 11 deletions.
10 changes: 7 additions & 3 deletions libpod/container_internal.go
Original file line number Diff line number Diff line change
Expand Up @@ -306,9 +306,13 @@ func (c *Container) handleRestartPolicy(ctx context.Context) (_ bool, retErr err
return false, err
}

// set up slirp4netns again because slirp4netns will die when conmon exits
if err := c.setupRootlessNetwork(); err != nil {
return false, err
// only do this if the container is not in a userns, if we are the cleanupNetwork()
// was called above and a proper network setup is needed which is part of the init() below.
if !c.config.PostConfigureNetNS {
// set up slirp4netns again because slirp4netns will die when conmon exits
if err := c.setupRootlessNetwork(); err != nil {
return false, err
}
}

if c.state.State == define.ContainerStateStopped {
Expand Down
27 changes: 19 additions & 8 deletions test/system/500-networking.bats
Original file line number Diff line number Diff line change
Expand Up @@ -874,16 +874,14 @@ EOF
# Test for https://github.com/containers/podman/issues/18615
@test "podman network cleanup --userns + --restart" {
skip_if_cgroupsv1 "run --uidmap fails on cgroups v1 (issue 15025, wontfix)"
userns="--userns=keep-id"
if ! is_rootless; then
userns="--uidmap=0:1111111:65536 --gidmap=0:1111111:65536"
fi

local net1=a-$(random_string 10)
# use /29 subnet to limit available ip space, a 29 gives 5 usable addresses (6 - 1 for the gw)
local subnet="$(random_rfc1918_subnet).0/29"
run_podman network create --subnet $subnet $net1
local cname=con-$(random_string 10)
local cname=con1-$(random_string 10)
local cname2=con2-$(random_string 10)
local cname3=

local netns_count=
if ! is_rootless; then
Expand All @@ -896,18 +894,31 @@ EOF

# Previously this would fail as the container would run out of ips after 5 restarts.
run_podman inspect --format "{{.RestartCount}}" $cname
assert "$output" == "6" "RestartCount for failing container"
assert "$output" == "6" "RestartCount for failing container with bridge network"

# Now make sure we can still run a container with free ips.
run_podman run --rm --network $net1 $IMAGE true

if ! is_rootless; then
# And now because of all the fun we have to check the same with slirp4netns and pasta because
# that uses slighlty different code paths. Note this would dealock before the fix.
# https://github.com/containers/podman/issues/21477
run_podman 1 run --name $cname2 --network slirp4netns --restart on-failure:2 --userns keep-id $IMAGE false
run_podman inspect --format "{{.RestartCount}}" $cname2
assert "$output" == "2" "RestartCount for failing container with slirp4netns"

if is_rootless; then
# pasta can only run rootless
cname3=con3-$(random_string 10)
run_podman 1 run --name $cname3 --network pasta --restart on-failure:2 --userns keep-id $IMAGE false
run_podman inspect --format "{{.RestartCount}}" $cname3
assert "$output" == "2" "RestartCount for failing container with pasta"
else
# This is racy if other programs modify /run/netns while the test is running.
# However I think the risk is minimal and I think checking for this is important.
assert "$(ls /run/netns | wc -l)" == "$netns_count" "/run/netns has no leaked netns files"
fi

run_podman rm -f -t0 $cname
run_podman rm -f -t0 $cname $cname2 $cname3
run_podman network rm $net1
}

Expand Down

0 comments on commit 7d15bc2

Please sign in to comment.