-
Notifications
You must be signed in to change notification settings - Fork 6
/
Copy pathchapter.filesystem.xml
679 lines (660 loc) · 21.8 KB
/
chapter.filesystem.xml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
<?xml version="1.0" encoding="UTF-8"?>
<part id="index"><?dbhtml dir="filesystem" ?>
<title>File System</title>
<chapter id="gpart">
<title>gpart -- control utility for the disk partitioning GEOM class</title>
<para>显示gpt分区</para>
<screen>
<![CDATA[
root@netkiller:~ # gpart show
=> 34 312581741 ada0 GPT (149G)
34 8388608 1 freebsd-swap (4.0G)
8388642 128 2 freebsd-boot (64k)
8388770 94371712 3 freebsd-ufs (45G)
102760482 209821293 - free - (100G)
]]>
</screen>
<para>建立gpt分区</para>
<screen>
<![CDATA[
root@netkiller:~ # gpart add -t freebsd-ufs ada0
ada0p4 added
root@netkiller:~ # gpart show
=> 34 312581741 ada0 GPT (149G)
34 8388608 1 freebsd-swap (4.0G)
8388642 128 2 freebsd-boot (64k)
8388770 94371712 3 freebsd-ufs (45G)
102760482 209821293 4 freebsd-ufs (100G)
# newfs /dev/ada0p4
/dev/ada0p4: 102451.8MB (209821288 sectors) block size 32768, fragment size 4096
using 164 cylinder groups of 626.09MB, 20035 blks, 80256 inodes.
super-block backups (for fsck_ffs -b #) at:
192, 1282432, 2564672, 3846912, 5129152, 6411392, 7693632, 8975872, 10258112,
11540352, 12822592, 14104832, 15387072, 16669312, 17951552, 19233792,
20516032, 21798272, 23080512, 24362752, 25644992, 26927232, 28209472,
29491712, 30773952, 32056192, 33338432, 34620672, 35902912, 37185152,
38467392, 39749632, 41031872, 42314112, 43596352, 44878592, 46160832,
47443072, 48725312, 50007552, 51289792, 52572032, 53854272, 55136512,
56418752, 57700992, 58983232, 60265472, 61547712, 62829952, 64112192,
65394432, 66676672, 67958912, 69241152, 70523392, 71805632, 73087872,
74370112, 75652352, 76934592, 78216832, 79499072, 80781312, 82063552,
83345792, 84628032, 85910272, 87192512, 88474752, 89756992, 91039232,
92321472, 93603712, 94885952, 96168192, 97450432, 98732672, 100014912,
101297152, 102579392, 103861632, 105143872, 106426112, 107708352, 108990592,
110272832, 111555072, 112837312, 114119552, 115401792, 116684032, 117966272,
119248512, 120530752, 121812992, 123095232, 124377472, 125659712, 126941952,
128224192, 129506432, 130788672, 132070912, 133353152, 134635392, 135917632,
137199872, 138482112, 139764352, 141046592, 142328832, 143611072, 144893312,
146175552, 147457792, 148740032, 150022272, 151304512, 152586752, 153868992,
155151232, 156433472, 157715712, 158997952, 160280192, 161562432, 162844672,
164126912, 165409152, 166691392, 167973632, 169255872, 170538112, 171820352,
173102592, 174384832, 175667072, 176949312, 178231552, 179513792, 180796032,
182078272, 183360512, 184642752, 185924992, 187207232, 188489472, 189771712,
191053952, 192336192, 193618432, 194900672, 196182912, 197465152, 198747392,
200029632, 201311872, 202594112, 203876352, 205158592, 206440832, 207723072,
209005312
]]>
</screen>
</chapter>
<chapter id="filesystem.winfs">
<title>Windows 文件系统</title>
<section id="msdos">
<title>msdos/fat16 文件系统</title>
<section>
<title>分区格式化</title>
<para>进入 sysinstall 给硬盘分区</para>
<screen>
sysinstall - confgure - fdisk
</screen>
<para>格式化分区</para>
<screen>
newfs_msdos /dev/da0s1
</screen>
</section>
<section>
<title>挂载 msdoc 分区</title>
<screen>
mount -t msdos /dev/da0s1 /mnt
</screen>
<para></para>
<screen>
mount_msdosfs /dev/da0s1 /mnt
</screen>
</section>
</section>
</chapter>
<chapter id="zfs">
<title>ZFS</title>
<section id="zfs.enable">
<title>初始化</title>
<screen>
echo "zfs_enable=YES" >> /etc/rc.conf
echo 'daily_status_zfs_enable="YES"' >> /etc/periodic.conf
freebsd# vi /etc/rc.conf
zfs_enable="YES"
freebsd# /etc/rc.d/zfs faststart
</screen>
<para>另一种临时启动方法</para>
<screen>
# /etc/rc.d/zfs onestart
</screen>
</section>
<section id="zfs.basic">
<title>Creating a Basic Filesystem</title>
<screen>
# zpool create tank c0t0d0
# zfs create tank/fs
# mkfile 100m /tank/fs/foo
# df -h /tank/fs
Filesystem size used avail capacity Mounted on
tank/fs 80G 100M 80G 1% /tank/fs
</screen>
<screen>
# zpool create tank c0t0d0 c1t0d0
</screen>
</section>
<section id="zfs.zpool">
<title>Creating a Storage Pool</title>
<section>
<title>Mirrored Pool</title>
<command># zpool create tank mirror c0t0d0 c0t0d1</command>
<screen>
freebsd# zpool create tank mirror ad1 ad3
freebsd# zpool status tank
pool: tank
state: ONLINE
scrub: none requested
config:
NAME STATE READ WRITE CKSUM
tank ONLINE 0 0 0
mirror ONLINE 0 0 0
ad1 ONLINE 0 0 0
ad3 ONLINE 0 0 0
errors: No known data errors
</screen>
</section>
<section>
<title>RAID-Z Pool</title>
<screen>
freebsd# zpool create zfs raidz ad1 ad3
freebsd# zpool status zfs
pool: zfs
state: ONLINE
scrub: none requested
config:
NAME STATE READ WRITE CKSUM
zfs ONLINE 0 0 0
raidz1 ONLINE 0 0 0
ad1 ONLINE 0 0 0
ad3 ONLINE 0 0 0
errors: No known data errors
</screen>
</section>
<section>
<title>Querying Pool Status</title>
<para>You can see that your pool was successfully created by using the zpool list command:</para>
<screen>
freebsd# zpool list
NAME SIZE USED AVAIL CAP HEALTH ALTROOT
zfs 3.97G 234K 3.97G 0% ONLINE -
</screen>
</section>
<section>
<title>Destroying Pools</title>
<para>Pools are destroyed by using the zpool destroy command. This command destroys the pool even if it contains mounted datasets.</para>
<screen>
# zpool destroy tank
</screen>
<para>Destroying a Pool With Faulted Devices</para>
<screen>
# zpool destroy tank
cannot destroy 'tank': pool is faulted
use '-f' to force destruction anyway
# zpool destroy -f tank
</screen>
</section>
</section>
<section id="zfs.fs">
<title>Creating a Filesystem Hierarchy</title>
<section>
<title>Creating a Filesystem</title>
<screen>
freebsd# zfs create zfs/www
freebsd# mount
/dev/ad0s1a on / (ufs, local)
devfs on /dev (devfs, local, multilabel)
/dev/ad0s1e on /tmp (ufs, local, soft-updates)
/dev/ad0s1f on /usr (ufs, local, soft-updates)
/dev/ad0s1d on /var (ufs, local, soft-updates)
zfs on /zfs (zfs, local)
zfs/www on /zfs/www (zfs, local)
</screen>
<para></para>
<screen>
freebsd# zfs set compression=gzip zfs/www
</screen>
</section>
<section>
<title>Setting Quotas</title>
<para>we want to give bonwick a quota of 10 Gbytes</para>
<screen>
# zfs set quota=10G tank/home/bonwick
</screen>
</section>
<section>
<title>Setting Reservations</title>
<screen>
# zfs set reservation=5G tank/home/moore
# zfs get reservation tank/home/moore
NAME PROPERTY VALUE SOURCE
tank/home/moore reservation 5.00G local
</screen>
</section>
<section>
<title>Querying Filesystem Information</title>
<screen>
freebsd# zfs list
NAME USED AVAIL REFER MOUNTPOINT
tank 97.5K 1.95G 18K /tank
tank/neo 18K 1.95G 18K /tank/neo
</screen>
</section>
<section>
<title>Renaming a Filesystem</title>
<screen>
# zfs rename tank/home/maybee tank/ws/maybee
</screen>
</section>
<section>
<title>Destroying a Filesystem</title>
<screen>
# zfs destroy tank/home/tabriz
</screen>
<para></para>
<screen>
# zfs destroy tank/home/ahrens
cannot unmount 'tank/home/ahrens': Device busy
# zfs destroy -f tank/home/ahrens
</screen>
<screen>
# zfs destroy -r tank/home/schrock
cannot destroy 'tank/home/schrock': filesystem has dependant clones
use '-R' to destroy the following datasets:
tank/clones/schrock-clone
# zfs destroy -R tank/home/schrock
</screen>
</section>
</section>
<section id="zfs.mount">
<title>zfs mount/umount</title>
<para>Legacy mount points must be managed through legacy tools. An attempt to use ZFS tools result in an error.</para>
<screen>
# zfs mount pool/home/billm
cannot mount 'pool/home/billm': legacy mountpoint
use mount(1M) to mount this filesystem
# mount -F zfs tank/home/billm
</screen>
<section>
<title>Temporary Mount Properties</title>
<screen>
# zfs mount -o ro tank/home/perrin
# zfs mount -o remount,noatime tank/home/perrin
# zfs get atime tank/home/perrin
NAME PROPERTY VALUE SOURCE
tank/home/perrin atime off temporary
</screen>
</section>
<section>
<title>Mounting File Systems</title>
<para>mount</para>
<screen>
freebsd# zfs mount zfs/www
freebsd# mount
/dev/ad0s1a on / (ufs, local)
devfs on /dev (devfs, local, multilabel)
/dev/ad0s1e on /tmp (ufs, local, soft-updates)
/dev/ad0s1f on /usr (ufs, local, soft-updates)
/dev/ad0s1d on /var (ufs, local, soft-updates)
zfs on /zfs (zfs, local)
zfs/www on /zfs/www (zfs, local)
</screen>
<para>The -a option can be used to mount all ZFS managed filesystems. Legacy managed filesystems are not mounted.</para>
<screen>
# zfs mount -a
</screen>
</section>
<section>
<title>Unmounting File Systems</title>
<para>umount</para>
<screen>
freebsd# zfs umount /zfs/www
freebsd# mount
/dev/ad0s1a on / (ufs, local)
devfs on /dev (devfs, local, multilabel)
/dev/ad0s1e on /tmp (ufs, local, soft-updates)
/dev/ad0s1f on /usr (ufs, local, soft-updates)
/dev/ad0s1d on /var (ufs, local, soft-updates)
zfs on /zfs (zfs, local)
</screen>
</section>
<section>
<title>Legacy Mount Points</title>
<screen>
freebsd# zfs set mountpoint=/tank tank
freebsd# zfs mount -a
freebsd# mount
/dev/ad0s1a on / (ufs, local)
devfs on /dev (devfs, local, multilabel)
/dev/ad0s1e on /tmp (ufs, local, soft-updates)
/dev/ad0s1f on /usr (ufs, local, soft-updates)
/dev/ad0s1d on /var (ufs, local, soft-updates)
tank on /tank (zfs, local)
</screen>
</section>
</section>
<section id="zfs.sharing">
<title>Sharing ZFS File Systems</title>
<section>
<title>Controlling Share Semantics</title>
<screen>
freebsd# zfs set sharenfs=on zfs/www
</screen>
</section>
<section>
<title>Unsharing Filesystems</title>
<screen>
freebsd# zfs unshare zfs/www
</screen>
<para>This command unshares the zpool filesystem. To unshare all ZFS filesystems on the system, run:</para>
<screen>
freebsd# zfs unshare -a
</screen>
</section>
<section>
<title>Sharing Filesystems</title>
<screen>
freebsd# zfs share zfs/www
</screen>
<para>You can also share all ZFS filesystems on the system:</para>
<screen>
# zfs share -a
</screen>
</section>
</section>
<section id="zfs.device">
<title>Device Management</title>
<section>
<title>Adding Devices to a Pool</title>
<screen>
# zpool add scoop mirror c0t1d0 c1t1d0
</screen>
</section>
<section>
<title>Onlining and Offlining Devices</title>
<section>
<title>Taking a Device Offline</title>
<screen>
# zpool offline tank c0t0d0
bringing device 'c0t0d0' offline
</screen>
</section>
<section>
<title>Bringing a Device Online</title>
<screen>
# zpool online tank c0t0d0
bringing device 'c0t0d0' online
</screen>
</section>
</section>
<section>
<title>Replacing Devices</title>
<para>You can replace a device in a storage pool by using the zpool replace command.</para>
<screen>
# zpool replace tank c0t0d0 c0t0d1
</screen>
<para>In the above example, the previous device, c0t0d0, is replaced by c0t0d1.</para>
</section>
</section>
<section id="zfs.io">
<title>I/O Statistics</title>
<screen>
freebsd# zpool iostat
capacity operations bandwidth
pool used avail read write read write
---------- ----- ----- ----- ----- ----- -----
tank 73.5K 1.98G 0 2 119 2.10K
</screen>
<para></para>
<screen>
freebsd# zpool iostat
capacity operations bandwidth
pool used avail read write read write
---------- ----- ----- ----- ----- ----- -----
tank 73.5K 1.98G 0 2 119 2.10K
freebsd# zpool iostat tank 2
capacity operations bandwidth
pool used avail read write read write
---------- ----- ----- ----- ----- ----- -----
tank 73.5K 1.98G 0 1 84 1.48K
tank 73.5K 1.98G 0 0 0 0
tank 73.5K 1.98G 0 0 0 0
tank 73.5K 1.98G 0 0 0 0
tank 73.5K 1.98G 0 0 0 0
tank 73.5K 1.98G 0 0 0 0
tank 73.5K 1.98G 0 0 0 0
tank 73.5K 1.98G 0 0 0 0
tank 73.5K 1.98G 0 0 0 0
tank 73.5K 1.98G 0 0 0 0
tank 73.5K 1.98G 0 0 0 0
tank 73.5K 1.98G 0 0 0 0
tank 73.5K 1.98G 0 0 0 0
</screen>
<section>
<title>Virtual Device Statistics</title>
<screen>
freebsd# zpool iostat -v
capacity operations bandwidth
pool used avail read write read write
---------- ----- ----- ----- ----- ----- -----
tank 73.5K 1.98G 0 0 45 824
mirror 73.5K 1.98G 0 0 45 824
ad1 - - 0 0 1.12K 9.04K
ad3 - - 0 0 778 9.04K
---------- ----- ----- ----- ----- ----- -----
</screen>
<screen>
freebsd# zpool iostat -v tank 5
capacity operations bandwidth
pool used avail read write read write
---------- ----- ----- ----- ----- ----- -----
tank 124K 1.98G 0 0 12 367
mirror 124K 1.98G 0 0 12 367
ad1 - - 0 0 297 773
ad3 - - 0 0 270 773
---------- ----- ----- ----- ----- ----- -----
capacity operations bandwidth
pool used avail read write read write
---------- ----- ----- ----- ----- ----- -----
tank 124K 1.98G 0 0 0 0
mirror 124K 1.98G 0 0 0 0
ad1 - - 0 0 0 0
ad3 - - 0 0 0 0
---------- ----- ----- ----- ----- ----- -----
</screen>
</section>
</section>
<section id="zfs.status">
<title>Health Status</title>
<section>
<title>Basic Health Status</title>
<screen>
freebsd# zpool status -x
all pools are healthy
freebsd#
</screen>
</section>
<section>
<title>Detailed Health Status</title>
<screen>
freebsd# zpool status -x
all pools are healthy
freebsd# zpool status -v tank
pool: tank
state: ONLINE
scrub: none requested
config:
NAME STATE READ WRITE CKSUM
tank ONLINE 0 0 0
mirror ONLINE 0 0 0
ad1 ONLINE 0 0 0
ad3 ONLINE 0 0 0
errors: No known data errors
</screen>
</section>
</section>
<section id="zfs.export">
<title>Storage Pool Migration</title>
<section>
<title>Exporting a Pool</title>
<screen>
# zpool export tank
</screen>
<screen>
# zpool export tank
cannot unmount '/export/home/eschrock': Device busy
# zpool export -f tank
</screen>
</section>
<section>
<title>Importing Pools</title>
<screen>
# zpool import tank
</screen>
</section>
</section>
<section id="zfs.get">
<title>Querying Properties</title>
<screen>
freebsd# zfs get all tank
NAME PROPERTY VALUE SOURCE
tank type filesystem -
tank creation Sat Jun 19 17:49 2010 -
tank used 98.5K -
tank available 1.95G -
tank referenced 19K -
tank compressratio 1.00x -
tank mounted yes -
tank quota none default
tank reservation none default
tank recordsize 128K default
tank mountpoint /tank default
tank sharenfs off default
tank checksum on default
tank compression off default
tank atime on default
tank devices on default
tank exec on default
tank setuid on default
tank readonly off default
tank jailed off default
tank snapdir hidden default
tank aclmode groupmask default
tank aclinherit restricted default
tank canmount on default
tank shareiscsi off default
tank xattr off temporary
tank copies 1 default
tank version 3 -
tank utf8only off -
tank normalization none -
tank casesensitivity sensitive -
tank vscan off default
tank nbmand off default
tank sharesmb off default
tank refquota none default
tank refreservation none default
tank primarycache all default
tank secondarycache all default
tank usedbysnapshots 0 -
tank usedbydataset 19K -
tank usedbychildren 79.5K -
tank usedbyrefreservation 0 -
</screen>
</section>
<section id="zfs.backup">
<title>Backing Up and Restoring ZFS Data</title>
<section>
<title>Backing Up a ZFS Snapshot</title>
<screen>
<![CDATA[
# zfs backup tank/dana@111505 > /dev/rmt/0
# zfs backup pool/fs@snap | gzip > backupfile.gz
]]>
</screen>
</section>
<section>
<title>Restoring a ZFS Snapshot</title>
<screen>
<![CDATA[
# zfs backup tank/gozer@111105 > /dev/rmt/0
.
.
.
# zfs restore tank/gozer2@today < /dev/rmt/0
# zfs rename tank/gozer tank/gozer.old
# zfs rename tank/gozer2 tank/gozer
]]>
</screen>
<screen>
<![CDATA[
# zfs rollback tank/dana@111505
cannot rollback to 'tank/dana@111505': more recent snapshots exist
use '-r' to force deletion of the following snapshots:
tank/dana@now
# zfs rollback -r tank/dana@111505
# zfs restore tank/dana < /dev/rmt/0
]]>
</screen>
</section>
<section>
<title>Remote Replication of a ZFS File System</title>
<screen>
# zfs backup tank/neo@today | ssh newsys zfs restore sandbox/restfs@today
</screen>
</section>
</section>
<section id="zfs.snapshot">
<title>ZFS Snapshots and Clones</title>
<section>
<title>ZFS Snapshots</title>
<para>filesystem@snapname, volume@snapname</para>
<section>
<title>Creating ZFS Snapshots</title>
<para>The following example creates a snapshot of tank/neo that is named friday.</para>
<screen>
freebsd# zfs snapshot tank/neo@friday
</screen>
</section>
<section>
<title>Destroying ZFS Snapshots</title>
<para>Snapshots are destroyed by using the zfs destroy command.</para>
<screen>
# zfs destroy tank/home/ahrens@friday
</screen>
</section>
<section>
<title>Renaming ZFS Snapshots</title>
<screen>
# zfs rename tank/home/cindys@111205 pool/home/cindys@today
</screen>
</section>
<section>
<title>Displaying and Accessing ZFS Snapshots</title>
<screen>
freebsd# zfs list -t snapshot
NAME USED AVAIL REFER MOUNTPOINT
tank/neo@friday 0 - 18K -
</screen>
</section>
<section>
<title>Rolling Back to a Snapshot</title>
<screen>
# zfs rollback pool/home/ahrens@tuesday
cannot rollback to 'pool/home/ahrens@tuesday': more recent snapshots exist
use '-r' to force deletion of the following snapshots:
pool/home/ahrens@wednesday
pool/home/ahrens@thursday
# zfs rollback -r pool/home/ahrens@tuesday
</screen>
</section>
</section>
<section>
<title>ZFS Clones</title>
<section>
<title>Creating a Clone</title>
<screen>
# zfs clone pool/ws/gate@yesterday pool/home/ahrens/bug123
# zfs snapshot projects/newproject@today
# zfs clone projects/newproject@today projects/teamA/tempuser
# zfs set sharenfs=on projects/teamA/tempuser
# zfs set quota=5G projects/teamA/tempuser
</screen>
</section>
<section>
<title>Destroying a Clone</title>
<para>ZFS clones are destroyed with the zfs destroy command.</para>
<screen>
# zfs destroy pool/home/ahrens/bug123
</screen>
<para>Clones must be destroyed before the parent snapshot can be destroyed.</para>
</section>
</section>
</section>
<section id="zfs.vol">
<title>Emulated Volumes</title>
<screen>
# zfs create -V 5gb tank/vol
</screen>
</section>
</chapter>
</part>