Skip to content

Commit

Permalink
Merge PR ceph#16036 into HEAD
Browse files Browse the repository at this point in the history
* refs/remotes/upstream/pull/16036/head:
	mds: improve cap min/max ratio descriptions
	mds: fix whitespace
	mds: cap client recall to min caps per client
	mds: fix conf types
	mds: fix whitespace
	doc/cephfs: add client min cache and max cache ratio describe
	mds: adding tunable features for caps_per_client

Reviewed-by: Patrick Donnelly <pdonnell@redhat.com>
Reviewed-by: Zheng Yan <zyan@redhat.com>
  • Loading branch information
batrick committed Sep 29, 2017
2 parents a2d7279 + dcf97d1 commit 1da4a50
Show file tree
Hide file tree
Showing 4 changed files with 61 additions and 22 deletions.
14 changes: 14 additions & 0 deletions doc/cephfs/mds-config-ref.rst
Original file line number Diff line number Diff line change
Expand Up @@ -613,3 +613,17 @@

:Type: Boolean
:Default: ``false``


``mds min caps per client``

:Description: Set the minimum number of capabilities a client may hold.
:Type: Integer
:Default: ``100``


``mds max ratio caps per client``

:Description: Set the maximum ratio of current caps that may be recalled during MDS cache pressure.
:Type: Float
:Default: ``0.8``
37 changes: 25 additions & 12 deletions qa/tasks/cephfs/test_client_limits.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ class TestClientLimits(CephFSTestCase):
REQUIRE_KCLIENT_REMOTE = True
CLIENTS_REQUIRED = 2

def _test_client_pin(self, use_subdir):
def _test_client_pin(self, use_subdir, open_files):
"""
When a client pins an inode in its cache, for example because the file is held open,
it should reject requests from the MDS to trim these caps. The MDS should complain
Expand All @@ -39,13 +39,16 @@ def _test_client_pin(self, use_subdir):
:param use_subdir: whether to put test files in a subdir or use root
"""

cache_size = 100
open_files = 200
cache_size = open_files/2

self.set_conf('mds', 'mds cache size', cache_size)
self.fs.mds_fail_restart()
self.fs.wait_for_daemons()

mds_min_caps_per_client = int(self.fs.get_config("mds_min_caps_per_client"))
self.assertTrue(open_files >= mds_min_caps_per_client)
mds_max_ratio_caps_per_client = float(self.fs.get_config("mds_max_ratio_caps_per_client"))

mount_a_client_id = self.mount_a.get_global_id()
path = "subdir/mount_a" if use_subdir else "mount_a"
open_proc = self.mount_a.open_n_background(path, open_files)
Expand All @@ -62,8 +65,7 @@ def _test_client_pin(self, use_subdir):
# MDS should not be happy about that, as the client is failing to comply
# with the SESSION_RECALL messages it is being sent
mds_recall_state_timeout = float(self.fs.get_config("mds_recall_state_timeout"))
self.wait_for_health("MDS_CLIENT_RECALL",
mds_recall_state_timeout + 10)
self.wait_for_health("MDS_CLIENT_RECALL", mds_recall_state_timeout+10)

# We can also test that the MDS health warning for oversized
# cache is functioning as intended.
Expand All @@ -82,19 +84,30 @@ def _test_client_pin(self, use_subdir):

# The remaining caps should comply with the numbers sent from MDS in SESSION_RECALL message,
# which depend on the caps outstanding, cache size and overall ratio
self.wait_until_equal(
lambda: self.get_session(mount_a_client_id)['num_caps'],
int(open_files * 0.2),
timeout=30,
reject_fn=lambda x: x < int(open_files*0.2))
def expected_caps():
num_caps = self.get_session(mount_a_client_id)['num_caps']
if num_caps < mds_min_caps_per_client:
raise
elif num_caps == mds_min_caps_per_client:
return True
elif num_caps == int((1.0-mds_max_ratio_caps_per_client)*(open_files+2)):
return True
else:
return False

self.wait_until_true(expected_caps, timeout=60)

@needs_trimming
def test_client_pin_root(self):
self._test_client_pin(False)
self._test_client_pin(False, 400)

@needs_trimming
def test_client_pin(self):
self._test_client_pin(True)
self._test_client_pin(True, 800)

@needs_trimming
def test_client_pin_mincaps(self):
self._test_client_pin(True, 200)

def test_client_release_bug(self):
"""
Expand Down
8 changes: 8 additions & 0 deletions src/common/options.cc
Original file line number Diff line number Diff line change
Expand Up @@ -5835,6 +5835,14 @@ std::vector<Option> get_mds_options() {
Option("mds_client_writeable_range_max_inc_objs", Option::TYPE_UINT, Option::LEVEL_ADVANCED)
.set_default(1024)
.set_description(""),

Option("mds_min_caps_per_client", Option::TYPE_UINT, Option::LEVEL_ADVANCED)
.set_default(100)
.set_description("minimum number of capabilities a client may hold"),

Option("mds_max_ratio_caps_per_client", Option::TYPE_FLOAT, Option::LEVEL_ADVANCED)
.set_default(.8)
.set_description("maximum ratio of current caps that may be recalled during MDS cache pressure"),
});
}

Expand Down
24 changes: 14 additions & 10 deletions src/mds/Server.cc
Original file line number Diff line number Diff line change
Expand Up @@ -1083,8 +1083,14 @@ void Server::recover_filelocks(CInode *in, bufferlist locks, int64_t client)
void Server::recall_client_state(void)
{
/* try to recall at least 80% of all caps */
uint64_t max_caps_per_client = (Capability::count() * .8);
uint64_t min_caps_per_client = 100;
uint64_t max_caps_per_client = Capability::count() * g_conf->get_val<double>("mds_max_ratio_caps_per_client");
uint64_t min_caps_per_client = g_conf->get_val<uint64_t>("mds_min_caps_per_client");
if (max_caps_per_client < min_caps_per_client) {
dout(0) << "max_caps_per_client " << max_caps_per_client
<< " < min_caps_per_client " << min_caps_per_client << dendl;
max_caps_per_client = min_caps_per_client + 1;
}

/* unless this ratio is smaller: */
/* ratio: determine the amount of caps to recall from each client. Use
* percentage full over the cache reservation. Cap the ratio at 80% of client
Expand All @@ -1107,14 +1113,12 @@ void Server::recall_client_state(void)
<< ", leases " << session->leases.size()
<< dendl;

if (session->caps.size() > min_caps_per_client) {
uint64_t newlim = MIN((session->caps.size() * ratio), max_caps_per_client);
if (session->caps.size() > newlim) {
MClientSession *m = new MClientSession(CEPH_SESSION_RECALL_STATE);
m->head.max_caps = newlim;
mds->send_message_client(m, session);
session->notify_recall_sent(newlim);
}
uint64_t newlim = MAX(MIN((session->caps.size() * ratio), max_caps_per_client), min_caps_per_client);
if (session->caps.size() > newlim) {
MClientSession *m = new MClientSession(CEPH_SESSION_RECALL_STATE);
m->head.max_caps = newlim;
mds->send_message_client(m, session);
session->notify_recall_sent(newlim);
}
}
}
Expand Down

0 comments on commit 1da4a50

Please sign in to comment.