-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathfleet.yaml
123 lines (121 loc) · 3.92 KB
/
fleet.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
defaultNamespace: rook-ceph
dependsOn:
- name: aldaas-fleet-rook
helm:
repo: https://charts.rook.io/release
chart: rook-ceph-cluster
values:
cephClusterSpec:
dataDirHostPath: /var/lib/rook
continueUpgradeAfterChecksEvenIfNotHealthy: true
mgr:
count: 1
modules:
- name: pg_autoscaler
enabled: true
dashboard:
enabled: true
ssl: false
# placement:
# all:
# podAntiAffinity:
# preferredDuringSchedulingIgnoredDuringExecution:
# - weight: 100
# podAffinityTerm:
# labelSelector:
# matchExpressions:
# - key: rook_cluster
# operator: In
# values:
# - rook-ceph
# topologyKey: kubernetes.io/hostname
mon:
count: 3
allowMultiplePerNode: false
volumeClaimTemplate:
spec:
storageClassName: global.fleet.clusterLabels.ceph-storage
resources:
requests:
storage: 10Gi
storage:
storageClassDeviceSets:
- name: osd-pool
count: 3
portable: true
encrypted: false
volumeClaimTemplates:
- metadata:
name: data
spec:
resources:
requests:
storage: 100Gi
storageClassName: global.fleet.clusterLabels.ceph-storage
volumeMode: Block
accessModes:
- ReadWriteOnce
placement:
topologySpreadConstraints:
- maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
labelSelector:
matchExpressions:
- key: app
operator: In
values:
- rook-ceph-osd
preparePlacement:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 100
podAffinityTerm:
labelSelector:
matchExpressions:
- key: app
operator: In
values:
- rook-ceph-osd
- rook-ceph-osd-prepare
topologyKey: kubernetes.io/hostname
onlyApplyOSDPlacement: false
cephBlockPools:
- name: replicated-metadata-pool
spec:
failureDomain: osd
replicated:
size: 3
deviceClass: ssd
storageClass:
enabled: false
- name: ec-data-pool
spec:
failureDomain: osd
erasureCoded:
dataChunks: 2
codingChunks: 1
deviceClass: ssd
storageClass:
enabled: true
name: ceph-block
isDefault: false
reclaimPolicy: Delete
allowVolumeExpansion: true
parameters:
dataPool: ec-data-pool
pool: replicated-metadata-pool
imageFeatures: layering
csi.storage.k8s.io/provisioner-secret-name: rook-csi-rbd-provisioner
csi.storage.k8s.io/provisioner-secret-namespace: "{{ .Release.Namespace }}"
csi.storage.k8s.io/controller-expand-secret-name: rook-csi-rbd-provisioner
csi.storage.k8s.io/controller-expand-secret-namespace: "{{ .Release.Namespace }}"
csi.storage.k8s.io/node-stage-secret-name: rook-csi-rbd-node
csi.storage.k8s.io/node-stage-secret-namespace: "{{ .Release.Namespace }}"
csi.storage.k8s.io/fstype: ext4
cephBlockPoolsVolumeSnapshotClass:
enabled: true
cephFileSystems: []
cephFileSystemVolumeSnapshotClass:
enabled: false
cephObjectStores: []