-
Notifications
You must be signed in to change notification settings - Fork 97
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Adjust Idle culler settings and add internal culling #1133
Changes from 9 commits
6c77d96
fcd46a6
0e4ed91
f7ae273
d6905bd
dec48e8
c896986
d06f298
127324f
97722e0
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,38 @@ | ||
# To help jupyterhub-idle-culler cull user servers, we configure the kernel manager to cull | ||
# idle kernels that would otherwise make the user servers report themselves as active which | ||
# is part of what jupyterhub-idle-culler considers. | ||
|
||
# Extra config available at: | ||
# https://zero-to-jupyterhub.readthedocs.io/en/1.x/jupyterhub/customizing/user-management.html#culling-user-pods | ||
|
||
# Timeout (in seconds) in which a terminal has been inactive and ready to | ||
# be culled. | ||
c.TerminalManager.cull_inactive_timeout = 15 * 60 | ||
|
||
# The interval (in seconds) on which to check for terminals exceeding the | ||
# inactive timeout value. | ||
c.TerminalManager.cull_interval = 5 * 60 | ||
|
||
# cull_idle_timeout: timeout (in seconds) after which an idle kernel is | ||
# considered ready to be culled | ||
c.MappingKernelManager.cull_idle_timeout = 15 * 60 | ||
|
||
# cull_interval: the interval (in seconds) on which to check for idle | ||
# kernels exceeding the cull timeout value | ||
c.MappingKernelManager.cull_interval = 5 * 60 | ||
|
||
# cull_connected: whether to consider culling kernels which have one | ||
# or more connections | ||
c.MappingKernelManager.cull_connected = True | ||
|
||
# cull_busy: whether to consider culling kernels which are currently | ||
# busy running some code | ||
c.MappingKernelManager.cull_busy = False | ||
|
||
# Shut down the server after N seconds with no kernels or terminals | ||
# running and no activity. | ||
c.NotebookApp.shutdown_no_activity_timeout = 15 * 60 | ||
|
||
############################################################################### | ||
# JupyterHub idle culler total timeout corresponds (approximately) to: | ||
# max(cull_idle_timeout, cull_inactive_timeout) + shutdown_no_activity_timeout |
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -10,6 +10,17 @@ resource "random_password" "proxy_secret_token" { | |
special = false | ||
} | ||
|
||
resource "kubernetes_config_map" "server-idle-culling" { | ||
metadata { | ||
name = "server-idle-culling" | ||
namespace = var.namespace | ||
} | ||
|
||
data = { | ||
"jupyter_notebook_config.py" = file("${path.module}/files/04-idle-culler.py") | ||
} | ||
} | ||
|
||
resource "helm_release" "jupyterhub" { | ||
name = "jupyterhub" | ||
namespace = var.namespace | ||
|
@@ -30,7 +41,16 @@ resource "helm_release" "jupyterhub" { | |
shared-pvc = var.shared-pvc | ||
conda-store-pvc = var.conda-store-pvc | ||
conda-store-mount = var.conda-store-mount | ||
extra-mounts = var.extra-mounts | ||
extra-mounts = merge( | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I think this is There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. as
and extra-mounts expect a dict for
|
||
var.extra-mounts, | ||
{ | ||
"/etc/jupyter" = { | ||
name = "server-idle-culling" | ||
namespace = var.namespace | ||
kind = "configmap" | ||
} | ||
} | ||
) | ||
environments = var.conda-store-environments | ||
} | ||
|
||
|
@@ -41,9 +61,9 @@ resource "helm_release" "jupyterhub" { | |
} | ||
|
||
extraConfig = { | ||
"01-theme.py" = file("${path.module}/files/01-theme.py") | ||
"02-spawner.py" = file("${path.module}/files/02-spawner.py") | ||
"03-profiles.py" = file("${path.module}/files/03-profiles.py") | ||
"01-theme.py" = file("${path.module}/files/01-theme.py") | ||
"02-spawner.py" = file("${path.module}/files/02-spawner.py") | ||
"03-profiles.py" = file("${path.module}/files/03-profiles.py") | ||
} | ||
|
||
services = { | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I'd prefer if extra mounts was instead added within the module so that we can directly reference the config map name. There is a terraform
concat
function that can be used.There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Hi @costrouc, just to be sure... you are saying something like this
on
main.tf
? I changed over merge, as concat only works for lists