-
-
Notifications
You must be signed in to change notification settings - Fork 276
/
Copy pathH5Dchunk.c
8269 lines (7027 loc) · 361 KB
/
H5Dchunk.c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
* Copyright by The HDF Group. *
* All rights reserved. *
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
* the LICENSE file, which can be found at the root of the source code *
* distribution tree, or in https://www.hdfgroup.org/licenses. *
* If you do not have access to either file, you may request a copy from *
* help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/* Purpose: Abstract indexed (chunked) I/O functions. The logical
* multi-dimensional dataspace is regularly partitioned into
* same-sized "chunks", the first of which is aligned with the
* logical origin. The chunks are indexed by different methods,
* that map a chunk index to disk address. Each chunk can be
* compressed independently and the chunks may move around in the
* file as their storage requirements change.
*
* Cache: Disk I/O is performed in units of chunks and H5MF_alloc()
* contains code to optionally align chunks on disk block
* boundaries for performance.
*
* The chunk cache is an extendible hash indexed by a function
* of storage B-tree address and chunk N-dimensional offset
* within the dataset. Collisions are not resolved -- one of
* the two chunks competing for the hash slot must be preempted
* from the cache. All entries in the hash also participate in
* a doubly-linked list and entries are penalized by moving them
* toward the front of the list. When a new chunk is about to
* be added to the cache the heap is pruned by preempting
* entries near the front of the list to make room for the new
* entry which is added to the end of the list.
*/
/****************/
/* Module Setup */
/****************/
#include "H5Dmodule.h" /* This source code file is part of the H5D module */
/***********/
/* Headers */
/***********/
#include "H5private.h" /* Generic Functions */
#ifdef H5_HAVE_PARALLEL
#include "H5ACprivate.h" /* Metadata cache */
#endif /* H5_HAVE_PARALLEL */
#include "H5CXprivate.h" /* API Contexts */
#include "H5Dpkg.h" /* Dataset functions */
#include "H5Eprivate.h" /* Error handling */
#include "H5Fprivate.h" /* File functions */
#include "H5FLprivate.h" /* Free Lists */
#include "H5Iprivate.h" /* IDs */
#include "H5MMprivate.h" /* Memory management */
#include "H5MFprivate.h" /* File memory management */
#include "H5PBprivate.h" /* Page Buffer */
#include "H5SLprivate.h" /* Skip Lists */
#include "H5VMprivate.h" /* Vector and array functions */
/****************/
/* Local Macros */
/****************/
/* Macros for iterating over chunks to operate on */
#define H5D_CHUNK_GET_FIRST_NODE(dinfo) \
(dinfo->layout_io_info.chunk_map->use_single \
? (H5SL_node_t *)(1) \
: H5SL_first(dinfo->layout_io_info.chunk_map->dset_sel_pieces))
#define H5D_CHUNK_GET_NODE_INFO(dinfo, node) \
(dinfo->layout_io_info.chunk_map->use_single ? dinfo->layout_io_info.chunk_map->single_piece_info \
: (H5D_piece_info_t *)H5SL_item(node))
#define H5D_CHUNK_GET_NEXT_NODE(dinfo, node) \
(dinfo->layout_io_info.chunk_map->use_single ? (H5SL_node_t *)NULL : H5SL_next(node))
#define H5D_CHUNK_GET_NODE_COUNT(dinfo) \
(dinfo->layout_io_info.chunk_map->use_single \
? (size_t)1 \
: H5SL_count(dinfo->layout_io_info.chunk_map->dset_sel_pieces))
/* Sanity check on chunk index types: commonly used by a lot of routines in this file */
#define H5D_CHUNK_STORAGE_INDEX_CHK(storage) \
do { \
assert((H5D_CHUNK_IDX_EARRAY == (storage)->idx_type && H5D_COPS_EARRAY == (storage)->ops) || \
(H5D_CHUNK_IDX_FARRAY == (storage)->idx_type && H5D_COPS_FARRAY == (storage)->ops) || \
(H5D_CHUNK_IDX_BT2 == (storage)->idx_type && H5D_COPS_BT2 == (storage)->ops) || \
(H5D_CHUNK_IDX_BTREE == (storage)->idx_type && H5D_COPS_BTREE == (storage)->ops) || \
(H5D_CHUNK_IDX_SINGLE == (storage)->idx_type && H5D_COPS_SINGLE == (storage)->ops) || \
(H5D_CHUNK_IDX_NONE == (storage)->idx_type && H5D_COPS_NONE == (storage)->ops)); \
} while (0)
/*
* Feature: If this constant is defined then every cache preemption and load
* causes a character to be printed on the standard error stream:
*
* `.': Entry was preempted because it has been completely read or
* completely written but not partially read and not partially
* written. This is often a good reason for preemption because such
* a chunk will be unlikely to be referenced in the near future.
*
* `:': Entry was preempted because it hasn't been used recently.
*
* `#': Entry was preempted because another chunk collided with it. This
* is usually a relatively bad thing. If there are too many of
* these then the number of entries in the cache can be increased.
*
* c: Entry was preempted because the file is closing.
*
* w: A chunk read operation was eliminated because the library is
* about to write new values to the entire chunk. This is a good
* thing, especially on files where the chunk size is the same as
* the disk block size, chunks are aligned on disk block boundaries,
* and the operating system can also eliminate a read operation.
*/
/*#define H5D_CHUNK_DEBUG */
/* Flags for the "edge_chunk_state" field below */
#define H5D_RDCC_DISABLE_FILTERS 0x01U /* Disable filters on this chunk */
#define H5D_RDCC_NEWLY_DISABLED_FILTERS \
0x02U /* Filters have been disabled since \
* the last flush */
/******************/
/* Local Typedefs */
/******************/
/* Raw data chunks are cached. Each entry in the cache is: */
typedef struct H5D_rdcc_ent_t {
bool locked; /*entry is locked in cache */
bool dirty; /*needs to be written to disk? */
bool deleted; /*chunk about to be deleted */
unsigned edge_chunk_state; /*states related to edge chunks (see above) */
hsize_t scaled[H5O_LAYOUT_NDIMS]; /*scaled chunk 'name' (coordinates) */
uint32_t rd_count; /*bytes remaining to be read */
uint32_t wr_count; /*bytes remaining to be written */
H5F_block_t chunk_block; /*offset/length of chunk in file */
hsize_t chunk_idx; /*index of chunk in dataset */
uint8_t *chunk; /*the unfiltered chunk data */
unsigned idx; /*index in hash table */
struct H5D_rdcc_ent_t *next; /*next item in doubly-linked list */
struct H5D_rdcc_ent_t *prev; /*previous item in doubly-linked list */
struct H5D_rdcc_ent_t *tmp_next; /*next item in temporary doubly-linked list */
struct H5D_rdcc_ent_t *tmp_prev; /*previous item in temporary doubly-linked list */
} H5D_rdcc_ent_t;
typedef H5D_rdcc_ent_t *H5D_rdcc_ent_ptr_t; /* For free lists */
/* Callback info for iteration to prune chunks */
typedef struct H5D_chunk_it_ud1_t {
H5D_chunk_common_ud_t common; /* Common info for B-tree user data (must be first) */
const H5D_chk_idx_info_t *idx_info; /* Chunked index info */
const H5D_io_info_t *io_info; /* I/O info for dataset operation */
const H5D_dset_io_info_t *dset_info; /* Dataset specific I/O info */
const hsize_t *space_dim; /* New dataset dimensions */
const bool *shrunk_dim; /* Dimensions which have been shrunk */
H5S_t *chunk_space; /* Dataspace for a chunk */
uint32_t elmts_per_chunk; /* Elements in chunk */
hsize_t *hyper_start; /* Starting location of hyperslab */
H5D_fill_buf_info_t fb_info; /* Dataset's fill buffer info */
bool fb_info_init; /* Whether the fill value buffer has been initialized */
} H5D_chunk_it_ud1_t;
/* Callback info for iteration to obtain chunk address and the index of the chunk for all chunks in the
* B-tree. */
typedef struct H5D_chunk_it_ud2_t {
/* down */
H5D_chunk_common_ud_t common; /* Common info for B-tree user data (must be first) */
/* up */
haddr_t *chunk_addr; /* Array of chunk addresses to fill in */
} H5D_chunk_it_ud2_t;
/* Callback info for iteration to copy data */
typedef struct H5D_chunk_it_ud3_t {
H5D_chunk_common_ud_t common; /* Common info for B-tree user data (must be first) */
H5F_t *file_src; /* Source file for copy */
H5D_chk_idx_info_t *idx_info_dst; /* Dest. chunk index info object */
void *buf; /* Buffer to hold chunk data for read/write */
void *bkg; /* Buffer for background information during type conversion */
size_t buf_size; /* Buffer size */
bool do_convert; /* Whether to perform type conversions */
/* needed for converting variable-length data */
const H5T_t *dt_src; /* Source datatype */
const H5T_t *dt_dst; /* Destination datatype */
const H5T_t *dt_mem; /* Memory datatype */
H5T_path_t *tpath_src_mem; /* Datatype conversion path from source file to memory */
H5T_path_t *tpath_mem_dst; /* Datatype conversion path from memory to dest. file */
void *reclaim_buf; /* Buffer for reclaiming data */
size_t reclaim_buf_size; /* Reclaim buffer size */
uint32_t nelmts; /* Number of elements in buffer */
H5S_t *buf_space; /* Dataspace describing buffer */
/* needed for compressed variable-length data */
const H5O_pline_t *pline; /* Filter pipeline */
unsigned dset_ndims; /* Number of dimensions in dataset */
const hsize_t *dset_dims; /* Dataset dimensions */
/* needed for copy object pointed by refs */
H5O_copy_t *cpy_info; /* Copy options */
/* needed for getting raw data from chunk cache */
bool chunk_in_cache;
uint8_t *chunk; /* the unfiltered chunk data */
} H5D_chunk_it_ud3_t;
/* Callback info for iteration to dump index */
typedef struct H5D_chunk_it_ud4_t {
FILE *stream; /* Output stream */
bool header_displayed; /* Node's header is displayed? */
unsigned ndims; /* Number of dimensions for chunk/dataset */
uint32_t *chunk_dim; /* Chunk dimensions */
} H5D_chunk_it_ud4_t;
/* Callback info for iteration to format convert chunks */
typedef struct H5D_chunk_it_ud5_t {
H5D_chk_idx_info_t *new_idx_info; /* Dest. chunk index info object */
unsigned dset_ndims; /* Number of dimensions in dataset */
hsize_t *dset_dims; /* Dataset dimensions */
} H5D_chunk_it_ud5_t;
/* Callback info for nonexistent readvv operation */
typedef struct H5D_chunk_readvv_ud_t {
unsigned char *rbuf; /* Read buffer to initialize */
const H5D_t *dset; /* Dataset to operate on */
} H5D_chunk_readvv_ud_t;
/* Typedef for chunk info iterator callback */
typedef struct H5D_chunk_info_iter_ud_t {
hsize_t scaled[H5O_LAYOUT_NDIMS]; /* Logical offset of the chunk */
hsize_t ndims; /* Number of dimensions in the dataset */
uint32_t nbytes; /* Size of stored data in the chunk */
unsigned filter_mask; /* Excluded filters */
haddr_t chunk_addr; /* Address of the chunk in file */
hsize_t chunk_idx; /* Chunk index, where the iteration needs to stop */
hsize_t curr_idx; /* Current index, where the iteration is */
unsigned idx_hint; /* Index of chunk in cache, if present */
bool found; /* Whether the chunk was found */
} H5D_chunk_info_iter_ud_t;
#ifdef H5_HAVE_PARALLEL
/* information to construct a collective I/O operation for filling chunks */
typedef struct H5D_chunk_coll_fill_info_t {
size_t num_chunks; /* Number of chunks in the write operation */
struct chunk_coll_fill_info {
haddr_t addr; /* File address of the chunk */
size_t chunk_size; /* Size of the chunk in the file */
bool unfiltered_partial_chunk;
} *chunk_info;
} H5D_chunk_coll_fill_info_t;
#endif /* H5_HAVE_PARALLEL */
typedef struct H5D_chunk_iter_ud_t {
H5D_chunk_iter_op_t op; /* User defined callback */
void *op_data; /* User data for user defined callback */
H5O_layout_chunk_t *chunk; /* Chunk layout */
haddr_t base_addr; /* Base address of the file, taking user block into account */
} H5D_chunk_iter_ud_t;
/********************/
/* Local Prototypes */
/********************/
/* Chunked layout operation callbacks */
static herr_t H5D__chunk_construct(H5F_t *f, H5D_t *dset);
static herr_t H5D__chunk_init(H5F_t *f, const H5D_t *dset, hid_t dapl_id);
static herr_t H5D__chunk_io_init(H5D_io_info_t *io_info, H5D_dset_io_info_t *dinfo);
static herr_t H5D__chunk_io_init_selections(H5D_io_info_t *io_info, H5D_dset_io_info_t *dinfo);
static herr_t H5D__chunk_mdio_init(H5D_io_info_t *io_info, H5D_dset_io_info_t *dinfo);
static herr_t H5D__chunk_read(H5D_io_info_t *io_info, H5D_dset_io_info_t *dinfo);
static herr_t H5D__chunk_write(H5D_io_info_t *io_info, H5D_dset_io_info_t *dinfo);
static herr_t H5D__chunk_flush(H5D_t *dset);
static herr_t H5D__chunk_io_term(H5D_io_info_t *io_info, H5D_dset_io_info_t *di);
static herr_t H5D__chunk_dest(H5D_t *dset);
/* Chunk query operation callbacks */
static int H5D__get_num_chunks_cb(const H5D_chunk_rec_t *chunk_rec, void *_udata);
static int H5D__get_chunk_info_cb(const H5D_chunk_rec_t *chunk_rec, void *_udata);
static int H5D__get_chunk_info_by_coord_cb(const H5D_chunk_rec_t *chunk_rec, void *_udata);
static int H5D__chunk_iter_cb(const H5D_chunk_rec_t *chunk_rec, void *udata);
/* "Nonexistent" layout operation callback */
static ssize_t H5D__nonexistent_readvv(const H5D_io_info_t *io_info, const H5D_dset_io_info_t *dset_info,
size_t chunk_max_nseq, size_t *chunk_curr_seq, size_t chunk_len_arr[],
hsize_t chunk_offset_arr[], size_t mem_max_nseq, size_t *mem_curr_seq,
size_t mem_len_arr[], hsize_t mem_offset_arr[]);
/* Format convert cb */
static int H5D__chunk_format_convert_cb(const H5D_chunk_rec_t *chunk_rec, void *_udata);
/* Helper routines */
static herr_t H5D__chunk_set_info_real(H5O_layout_chunk_t *layout, unsigned ndims, const hsize_t *curr_dims,
const hsize_t *max_dims);
static herr_t H5D__chunk_cinfo_cache_reset(H5D_chunk_cached_t *last);
static herr_t H5D__chunk_cinfo_cache_update(H5D_chunk_cached_t *last, const H5D_chunk_ud_t *udata);
static bool H5D__chunk_cinfo_cache_found(const H5D_chunk_cached_t *last, H5D_chunk_ud_t *udata);
static herr_t H5D__create_piece_map_single(H5D_dset_io_info_t *di, H5D_io_info_t *io_info);
static herr_t H5D__create_piece_file_map_all(H5D_dset_io_info_t *di, H5D_io_info_t *io_info);
static herr_t H5D__create_piece_file_map_hyper(H5D_dset_io_info_t *di, H5D_io_info_t *io_info);
static herr_t H5D__create_piece_mem_map_1d(const H5D_dset_io_info_t *di);
static herr_t H5D__create_piece_mem_map_hyper(const H5D_dset_io_info_t *di);
static herr_t H5D__piece_file_cb(void *elem, const H5T_t *type, unsigned ndims, const hsize_t *coords,
void *_opdata);
static herr_t H5D__piece_mem_cb(void *elem, const H5T_t *type, unsigned ndims, const hsize_t *coords,
void *_opdata);
static herr_t H5D__chunk_may_use_select_io(H5D_io_info_t *io_info, const H5D_dset_io_info_t *dset_info);
static unsigned H5D__chunk_hash_val(const H5D_shared_t *shared, const hsize_t *scaled);
static herr_t H5D__chunk_flush_entry(const H5D_t *dset, H5D_rdcc_ent_t *ent, bool reset);
static herr_t H5D__chunk_cache_evict(const H5D_t *dset, H5D_rdcc_ent_t *ent, bool flush);
static void *H5D__chunk_lock(const H5D_io_info_t *io_info, const H5D_dset_io_info_t *dset_info,
H5D_chunk_ud_t *udata, bool relax, bool prev_unfilt_chunk);
static herr_t H5D__chunk_unlock(const H5D_io_info_t *io_info, const H5D_dset_io_info_t *dset_info,
const H5D_chunk_ud_t *udata, bool dirty, void *chunk, uint32_t naccessed);
static herr_t H5D__chunk_cache_prune(const H5D_t *dset, size_t size);
static herr_t H5D__chunk_prune_fill(H5D_chunk_it_ud1_t *udata, bool new_unfilt_chunk);
#ifdef H5_HAVE_PARALLEL
static herr_t H5D__chunk_collective_fill(const H5D_t *dset, H5D_chunk_coll_fill_info_t *chunk_fill_info,
const void *fill_buf, const void *partial_chunk_fill_buf);
static int H5D__chunk_cmp_coll_fill_info(const void *_entry1, const void *_entry2);
#endif /* H5_HAVE_PARALLEL */
/* Debugging helper routine callback */
static int H5D__chunk_dump_index_cb(const H5D_chunk_rec_t *chunk_rec, void *_udata);
/*********************/
/* Package Variables */
/*********************/
/* Chunked storage layout I/O ops */
const H5D_layout_ops_t H5D_LOPS_CHUNK[1] = {{
H5D__chunk_construct, /* construct */
H5D__chunk_init, /* init */
H5D__chunk_is_space_alloc, /* is_space_alloc */
H5D__chunk_is_data_cached, /* is_data_cached */
H5D__chunk_io_init, /* io_init */
H5D__chunk_mdio_init, /* mdio_init */
H5D__chunk_read, /* ser_read */
H5D__chunk_write, /* ser_write */
NULL, /* readvv */
NULL, /* writevv */
H5D__chunk_flush, /* flush */
H5D__chunk_io_term, /* io_term */
H5D__chunk_dest /* dest */
}};
/*******************/
/* Local Variables */
/*******************/
/* "nonexistent" storage layout I/O ops */
static const H5D_layout_ops_t H5D_LOPS_NONEXISTENT[1] = {
{NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, H5D__nonexistent_readvv, NULL, NULL, NULL, NULL}};
/* Declare a free list to manage the H5F_rdcc_ent_ptr_t sequence information */
H5FL_SEQ_DEFINE_STATIC(H5D_rdcc_ent_ptr_t);
/* Declare a free list to manage H5D_rdcc_ent_t objects */
H5FL_DEFINE_STATIC(H5D_rdcc_ent_t);
/* Declare a free list to manage the H5D_chunk_info_t struct */
H5FL_DEFINE_STATIC(H5D_chunk_map_t);
/* Declare a free list to manage the H5D_piece_info_t struct */
H5FL_DEFINE(H5D_piece_info_t);
/* Declare a free list to manage the chunk sequence information */
H5FL_BLK_DEFINE_STATIC(chunk);
/* Declare extern free list to manage the H5S_sel_iter_t struct */
H5FL_EXTERN(H5S_sel_iter_t);
/*-------------------------------------------------------------------------
* Function: H5D__chunk_direct_write
*
* Purpose: Internal routine to write a chunk directly into the file.
*
* Return: Non-negative on success/Negative on failure
*
*-------------------------------------------------------------------------
*/
herr_t
H5D__chunk_direct_write(H5D_t *dset, uint32_t filters, hsize_t *offset, uint32_t data_size, const void *buf)
{
const H5O_layout_t *layout = &(dset->shared->layout); /* Dataset layout */
H5D_chunk_ud_t udata; /* User data for querying chunk info */
H5F_block_t old_chunk; /* Offset/length of old chunk */
H5D_chk_idx_info_t idx_info; /* Chunked index info */
hsize_t scaled[H5S_MAX_RANK]; /* Scaled coordinates for this chunk */
bool need_insert = false; /* Whether the chunk needs to be inserted into the index */
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_PACKAGE_TAG(dset->oloc.addr)
/* Sanity checks */
assert(layout->type == H5D_CHUNKED);
/* Allocate dataspace and initialize it if it hasn't been. */
if (!H5D__chunk_is_space_alloc(&layout->storage))
if (H5D__alloc_storage(dset, H5D_ALLOC_WRITE, false, NULL) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to initialize storage");
/* Calculate the index of this chunk */
H5VM_chunk_scaled(dset->shared->ndims, offset, layout->u.chunk.dim, scaled);
scaled[dset->shared->ndims] = 0;
/* Find out the file address of the chunk (if any) */
if (H5D__chunk_lookup(dset, scaled, &udata) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "error looking up chunk address");
/* Sanity check */
assert((H5_addr_defined(udata.chunk_block.offset) && udata.chunk_block.length > 0) ||
(!H5_addr_defined(udata.chunk_block.offset) && udata.chunk_block.length == 0));
/* Set the file block information for the old chunk */
/* (Which is only defined when overwriting an existing chunk) */
old_chunk.offset = udata.chunk_block.offset;
old_chunk.length = udata.chunk_block.length;
/* Check if the chunk needs to be inserted (it also could exist already
* and the chunk allocate operation could resize it)
*/
/* Compose chunked index info struct */
idx_info.f = dset->oloc.file;
idx_info.pline = &(dset->shared->dcpl_cache.pline);
idx_info.layout = &(dset->shared->layout.u.chunk);
idx_info.storage = &(dset->shared->layout.storage.u.chunk);
/* Set up the size of chunk for user data */
udata.chunk_block.length = data_size;
if (0 == idx_info.pline->nused && H5_addr_defined(old_chunk.offset))
/* If there are no filters and we are overwriting the chunk we can just set values */
need_insert = false;
else {
/* Otherwise, create the chunk it if it doesn't exist, or reallocate the chunk
* if its size has changed.
*/
if (H5D__chunk_file_alloc(&idx_info, &old_chunk, &udata.chunk_block, &need_insert, scaled) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "unable to allocate chunk");
/* Cache the new chunk information */
H5D__chunk_cinfo_cache_update(&dset->shared->cache.chunk.last, &udata);
} /* end else */
/* Make sure the address of the chunk is returned. */
if (!H5_addr_defined(udata.chunk_block.offset))
HGOTO_ERROR(H5E_DATASET, H5E_BADVALUE, FAIL, "chunk address isn't defined");
/* Evict the (old) entry from the cache if present, but do not flush
* it to disk */
if (UINT_MAX != udata.idx_hint) {
const H5D_rdcc_t *rdcc = &(dset->shared->cache.chunk); /*raw data chunk cache */
if (H5D__chunk_cache_evict(dset, rdcc->slot[udata.idx_hint], false) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTREMOVE, FAIL, "unable to evict chunk");
} /* end if */
/* Write the data to the file */
if (H5F_shared_block_write(H5F_SHARED(dset->oloc.file), H5FD_MEM_DRAW, udata.chunk_block.offset,
data_size, buf) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "unable to write raw data to file");
/* Insert the chunk record into the index */
if (need_insert && layout->storage.u.chunk.ops->insert) {
/* Set the chunk's filter mask to the new settings */
udata.filter_mask = filters;
if ((layout->storage.u.chunk.ops->insert)(&idx_info, &udata, dset) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTINSERT, FAIL, "unable to insert chunk addr into index");
} /* end if */
done:
FUNC_LEAVE_NOAPI_TAG(ret_value)
} /* end H5D__chunk_direct_write() */
/*-------------------------------------------------------------------------
* Function: H5D__chunk_direct_read
*
* Purpose: Internal routine to read a chunk directly from the file.
*
* Return: Non-negative on success/Negative on failure
*
*-------------------------------------------------------------------------
*/
herr_t
H5D__chunk_direct_read(const H5D_t *dset, hsize_t *offset, uint32_t *filters, void *buf)
{
const H5O_layout_t *layout = &(dset->shared->layout); /* Dataset layout */
const H5D_rdcc_t *rdcc = &(dset->shared->cache.chunk); /* raw data chunk cache */
H5D_chunk_ud_t udata; /* User data for querying chunk info */
hsize_t scaled[H5S_MAX_RANK]; /* Scaled coordinates for this chunk */
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_PACKAGE_TAG(dset->oloc.addr)
/* Check args */
assert(dset && H5D_CHUNKED == layout->type);
assert(offset);
assert(filters);
assert(buf);
*filters = 0;
/* Allocate dataspace and initialize it if it hasn't been. */
if (!H5D__chunk_is_space_alloc(&layout->storage) && !H5D__chunk_is_data_cached(dset->shared))
HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "storage is not initialized");
/* Calculate the index of this chunk */
H5VM_chunk_scaled(dset->shared->ndims, offset, layout->u.chunk.dim, scaled);
scaled[dset->shared->ndims] = 0;
/* Reset fields about the chunk we are looking for */
udata.filter_mask = 0;
udata.chunk_block.offset = HADDR_UNDEF;
udata.chunk_block.length = 0;
udata.idx_hint = UINT_MAX;
/* Find out the file address of the chunk */
if (H5D__chunk_lookup(dset, scaled, &udata) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "error looking up chunk address");
/* Sanity check */
assert((H5_addr_defined(udata.chunk_block.offset) && udata.chunk_block.length > 0) ||
(!H5_addr_defined(udata.chunk_block.offset) && udata.chunk_block.length == 0));
/* Check if the requested chunk exists in the chunk cache */
if (UINT_MAX != udata.idx_hint) {
H5D_rdcc_ent_t *ent = rdcc->slot[udata.idx_hint];
bool flush;
/* Sanity checks */
assert(udata.idx_hint < rdcc->nslots);
assert(rdcc->slot[udata.idx_hint]);
flush = (ent->dirty == true) ? true : false;
/* Flush the chunk to disk and clear the cache entry */
if (H5D__chunk_cache_evict(dset, rdcc->slot[udata.idx_hint], flush) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTREMOVE, FAIL, "unable to evict chunk");
/* Reset fields about the chunk we are looking for */
udata.filter_mask = 0;
udata.chunk_block.offset = HADDR_UNDEF;
udata.chunk_block.length = 0;
udata.idx_hint = UINT_MAX;
/* Get the new file address / chunk size after flushing */
if (H5D__chunk_lookup(dset, scaled, &udata) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "error looking up chunk address");
}
/* Make sure the address of the chunk is returned. */
if (!H5_addr_defined(udata.chunk_block.offset))
HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "chunk address isn't defined");
/* Read the chunk data into the supplied buffer */
if (H5F_shared_block_read(H5F_SHARED(dset->oloc.file), H5FD_MEM_DRAW, udata.chunk_block.offset,
udata.chunk_block.length, buf) < 0)
HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "unable to read raw data chunk");
/* Return the filter mask */
*filters = udata.filter_mask;
done:
FUNC_LEAVE_NOAPI_TAG(ret_value)
} /* end H5D__chunk_direct_read() */
/*-------------------------------------------------------------------------
* Function: H5D__get_chunk_storage_size
*
* Purpose: Internal routine to read the storage size of a chunk on disk.
*
* Return: Non-negative on success/Negative on failure
*
*-------------------------------------------------------------------------
*/
herr_t
H5D__get_chunk_storage_size(H5D_t *dset, const hsize_t *offset, hsize_t *storage_size)
{
const H5O_layout_t *layout = &(dset->shared->layout); /* Dataset layout */
const H5D_rdcc_t *rdcc = &(dset->shared->cache.chunk); /* raw data chunk cache */
hsize_t scaled[H5S_MAX_RANK]; /* Scaled coordinates for this chunk */
H5D_chunk_ud_t udata; /* User data for querying chunk info */
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_PACKAGE_TAG(dset->oloc.addr)
/* Check args */
assert(dset && H5D_CHUNKED == layout->type);
assert(offset);
assert(storage_size);
/* Allocate dataspace and initialize it if it hasn't been. */
if (!(*layout->ops->is_space_alloc)(&layout->storage))
HGOTO_DONE(SUCCEED);
/* Calculate the index of this chunk */
H5VM_chunk_scaled(dset->shared->ndims, offset, layout->u.chunk.dim, scaled);
scaled[dset->shared->ndims] = 0;
/* Reset fields about the chunk we are looking for */
udata.chunk_block.offset = HADDR_UNDEF;
udata.chunk_block.length = 0;
udata.idx_hint = UINT_MAX;
/* Find out the file address of the chunk */
if (H5D__chunk_lookup(dset, scaled, &udata) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "error looking up chunk address");
/* Sanity check */
assert((H5_addr_defined(udata.chunk_block.offset) && udata.chunk_block.length > 0) ||
(!H5_addr_defined(udata.chunk_block.offset) && udata.chunk_block.length == 0));
/* The requested chunk is not in cache or on disk */
if (!H5_addr_defined(udata.chunk_block.offset) && UINT_MAX == udata.idx_hint)
HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "chunk storage is not allocated");
/* Check if there are filters registered to the dataset */
if (dset->shared->dcpl_cache.pline.nused > 0) {
/* Check if the requested chunk exists in the chunk cache */
if (UINT_MAX != udata.idx_hint) {
H5D_rdcc_ent_t *ent = rdcc->slot[udata.idx_hint];
/* Sanity checks */
assert(udata.idx_hint < rdcc->nslots);
assert(rdcc->slot[udata.idx_hint]);
/* If the cached chunk is dirty, it must be flushed to get accurate size */
if (ent->dirty == true) {
/* Flush the chunk to disk and clear the cache entry */
if (H5D__chunk_cache_evict(dset, rdcc->slot[udata.idx_hint], true) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTREMOVE, FAIL, "unable to evict chunk");
/* Reset fields about the chunk we are looking for */
udata.chunk_block.offset = HADDR_UNDEF;
udata.chunk_block.length = 0;
udata.idx_hint = UINT_MAX;
/* Get the new file address / chunk size after flushing */
if (H5D__chunk_lookup(dset, scaled, &udata) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "error looking up chunk address");
}
}
/* Make sure the address of the chunk is returned. */
if (!H5_addr_defined(udata.chunk_block.offset))
HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "chunk address isn't defined");
/* Return the chunk size on disk */
*storage_size = udata.chunk_block.length;
}
/* There are no filters registered, return the chunk size from the storage layout */
else
*storage_size = dset->shared->layout.u.chunk.size;
done:
FUNC_LEAVE_NOAPI_TAG(ret_value)
} /* H5D__get_chunk_storage_size */
/*-------------------------------------------------------------------------
* Function: H5D__chunk_set_info_real
*
* Purpose: Internal routine to set the information about chunks for a dataset
*
* Return: SUCCEED/FAIL
*-------------------------------------------------------------------------
*/
static herr_t
H5D__chunk_set_info_real(H5O_layout_chunk_t *layout, unsigned ndims, const hsize_t *curr_dims,
const hsize_t *max_dims)
{
herr_t ret_value = SUCCEED;
FUNC_ENTER_PACKAGE
assert(layout);
assert(curr_dims);
/* Can happen when corrupt files are parsed */
if (ndims == 0)
HGOTO_ERROR(H5E_DATASET, H5E_BADVALUE, FAIL, "number of dimensions cannot be zero");
/* Compute the # of chunks in dataset dimensions */
layout->nchunks = 1;
layout->max_nchunks = 1;
for (unsigned u = 0; u < ndims; u++) {
/* Round up to the next integer # of chunks, to accommodate partial chunks */
layout->chunks[u] = ((curr_dims[u] + layout->dim[u]) - 1) / layout->dim[u];
if (H5S_UNLIMITED == max_dims[u])
layout->max_chunks[u] = H5S_UNLIMITED;
else {
/* Sanity check */
if (layout->dim[u] == 0)
HGOTO_ERROR(H5E_DATASET, H5E_BADVALUE, FAIL, "dimension size must be > 0, dim = %u ", u);
layout->max_chunks[u] = ((max_dims[u] + layout->dim[u]) - 1) / layout->dim[u];
}
/* Accumulate the # of chunks */
layout->nchunks *= layout->chunks[u];
layout->max_nchunks *= layout->max_chunks[u];
}
/* Get the "down" sizes for each dimension */
H5VM_array_down(ndims, layout->chunks, layout->down_chunks);
H5VM_array_down(ndims, layout->max_chunks, layout->max_down_chunks);
done:
FUNC_LEAVE_NOAPI(ret_value)
} /* end H5D__chunk_set_info_real() */
/*-------------------------------------------------------------------------
* Function: H5D__chunk_set_info
*
* Purpose: Sets the information about chunks for a dataset
*
* Return: Non-negative on success/Negative on failure
*
*-------------------------------------------------------------------------
*/
herr_t
H5D__chunk_set_info(const H5D_t *dset)
{
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_PACKAGE
/* Sanity checks */
assert(dset);
/* Set the base layout information */
if (H5D__chunk_set_info_real(&dset->shared->layout.u.chunk, dset->shared->ndims, dset->shared->curr_dims,
dset->shared->max_dims) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, FAIL, "can't set layout's chunk info");
/* Call the index's "resize" callback */
if (dset->shared->layout.storage.u.chunk.ops->resize &&
(dset->shared->layout.storage.u.chunk.ops->resize)(&dset->shared->layout.u.chunk) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, FAIL, "unable to resize chunk index information");
done:
FUNC_LEAVE_NOAPI(ret_value)
} /* end H5D__chunk_set_info() */
/*-------------------------------------------------------------------------
* Function: H5D__chunk_set_sizes
*
* Purpose: Sets chunk and type sizes.
*
* Return: SUCCEED/FAIL
*
*-------------------------------------------------------------------------
*/
herr_t
H5D__chunk_set_sizes(H5D_t *dset)
{
uint64_t chunk_size; /* Size of chunk in bytes */
unsigned max_enc_bytes_per_dim; /* Max. number of bytes required to encode this dimension */
unsigned u; /* Iterator */
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_PACKAGE
/* Sanity checks */
assert(dset);
/* Increment # of chunk dimensions, to account for datatype size as last element */
dset->shared->layout.u.chunk.ndims++;
/* Set the last dimension of the chunk size to the size of the datatype */
dset->shared->layout.u.chunk.dim[dset->shared->layout.u.chunk.ndims - 1] =
(uint32_t)H5T_GET_SIZE(dset->shared->type);
/* Compute number of bytes to use for encoding chunk dimensions */
max_enc_bytes_per_dim = 0;
for (u = 0; u < (unsigned)dset->shared->layout.u.chunk.ndims; u++) {
unsigned enc_bytes_per_dim; /* Number of bytes required to encode this dimension */
/* Get encoded size of dim, in bytes */
enc_bytes_per_dim = (H5VM_log2_gen(dset->shared->layout.u.chunk.dim[u]) + 8) / 8;
/* Check if this is the largest value so far */
if (enc_bytes_per_dim > max_enc_bytes_per_dim)
max_enc_bytes_per_dim = enc_bytes_per_dim;
} /* end for */
assert(max_enc_bytes_per_dim > 0 && max_enc_bytes_per_dim <= 8);
dset->shared->layout.u.chunk.enc_bytes_per_dim = max_enc_bytes_per_dim;
/* Compute and store the total size of a chunk */
/* (Use 64-bit value to ensure that we can detect >4GB chunks) */
for (u = 1, chunk_size = (uint64_t)dset->shared->layout.u.chunk.dim[0];
u < dset->shared->layout.u.chunk.ndims; u++)
chunk_size *= (uint64_t)dset->shared->layout.u.chunk.dim[u];
/* Check for chunk larger than can be represented in 32-bits */
/* (Chunk size is encoded in 32-bit value in v1 B-tree records) */
if (chunk_size > (uint64_t)0xffffffff)
HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "chunk size must be < 4GB");
H5_CHECKED_ASSIGN(dset->shared->layout.u.chunk.size, uint32_t, chunk_size, uint64_t);
done:
FUNC_LEAVE_NOAPI(ret_value)
} /* end H5D__chunk_set_sizes */
/*-------------------------------------------------------------------------
* Function: H5D__chunk_construct
*
* Purpose: Constructs new chunked layout information for dataset
*
* Return: Non-negative on success/Negative on failure
*
*-------------------------------------------------------------------------
*/
static herr_t
H5D__chunk_construct(H5F_t H5_ATTR_UNUSED *f, H5D_t *dset)
{
unsigned u; /* Local index variable */
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_PACKAGE
/* Sanity checks */
assert(f);
assert(dset);
/* Check for invalid chunk dimension rank */
if (0 == dset->shared->layout.u.chunk.ndims)
HGOTO_ERROR(H5E_DATASET, H5E_BADVALUE, FAIL, "no chunk information set?");
if (dset->shared->layout.u.chunk.ndims != dset->shared->ndims)
HGOTO_ERROR(H5E_DATASET, H5E_BADVALUE, FAIL, "dimensionality of chunks doesn't match the dataspace");
/* Set chunk sizes */
if (H5D__chunk_set_sizes(dset) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_BADVALUE, FAIL, "unable to set chunk sizes");
assert((unsigned)(dset->shared->layout.u.chunk.ndims) <= NELMTS(dset->shared->layout.u.chunk.dim));
/* Chunked storage is not compatible with external storage (currently) */
if (dset->shared->dcpl_cache.efl.nused > 0)
HGOTO_ERROR(H5E_DATASET, H5E_BADVALUE, FAIL, "external storage not supported with chunked layout");
/* Sanity check dimensions */
for (u = 0; u < dset->shared->layout.u.chunk.ndims - 1; u++) {
/* Don't allow zero-sized chunk dimensions */
if (0 == dset->shared->layout.u.chunk.dim[u])
HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "chunk size must be > 0, dim = %u ", u);
/*
* The chunk size of a dimension with a fixed size cannot exceed
* the maximum dimension size. If any dimension size is zero, there
* will be no such restriction.
*/
if (dset->shared->curr_dims[u] && dset->shared->max_dims[u] != H5S_UNLIMITED &&
dset->shared->max_dims[u] < dset->shared->layout.u.chunk.dim[u])
HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL,
"chunk size must be <= maximum dimension size for fixed-sized dimensions");
} /* end for */
/* Reset address and pointer of the array struct for the chunked storage index */
if (H5D_chunk_idx_reset(&dset->shared->layout.storage.u.chunk, true) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to reset chunked storage index");
done:
FUNC_LEAVE_NOAPI(ret_value)
} /* end H5D__chunk_construct() */
/*-------------------------------------------------------------------------
* Function: H5D__chunk_init
*
* Purpose: Initialize the raw data chunk cache for a dataset. This is
* called when the dataset is initialized.
*
* Return: Non-negative on success/Negative on failure
*
*-------------------------------------------------------------------------
*/
static herr_t
H5D__chunk_init(H5F_t *f, const H5D_t *const dset, hid_t dapl_id)
{
H5D_chk_idx_info_t idx_info; /* Chunked index info */
H5D_rdcc_t *rdcc = &(dset->shared->cache.chunk); /* Convenience pointer to dataset's chunk cache */
H5P_genplist_t *dapl; /* Data access property list object pointer */
H5O_storage_chunk_t *sc = &(dset->shared->layout.storage.u.chunk);
bool idx_init = false;
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_PACKAGE
/* Sanity check */
assert(f);
assert(dset);
H5D_CHUNK_STORAGE_INDEX_CHK(sc);
if (NULL == (dapl = (H5P_genplist_t *)H5I_object(dapl_id)))
HGOTO_ERROR(H5E_ID, H5E_BADID, FAIL, "can't find object for fapl ID");
/* Use the properties in dapl_id if they have been set, otherwise use the properties from the file */
if (H5P_get(dapl, H5D_ACS_DATA_CACHE_NUM_SLOTS_NAME, &rdcc->nslots) < 0)
HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "can't get data cache number of slots");
if (rdcc->nslots == H5D_CHUNK_CACHE_NSLOTS_DEFAULT)
rdcc->nslots = H5F_RDCC_NSLOTS(f);
if (H5P_get(dapl, H5D_ACS_DATA_CACHE_BYTE_SIZE_NAME, &rdcc->nbytes_max) < 0)
HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "can't get data cache byte size");
if (rdcc->nbytes_max == H5D_CHUNK_CACHE_NBYTES_DEFAULT)
rdcc->nbytes_max = H5F_RDCC_NBYTES(f);
if (H5P_get(dapl, H5D_ACS_PREEMPT_READ_CHUNKS_NAME, &rdcc->w0) < 0)
HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "can't get preempt read chunks");
if (rdcc->w0 < 0)
rdcc->w0 = H5F_RDCC_W0(f);
/* If nbytes_max or nslots is 0, set them both to 0 and avoid allocating space */
if (!rdcc->nbytes_max || !rdcc->nslots)
rdcc->nbytes_max = rdcc->nslots = 0;
else {
rdcc->slot = H5FL_SEQ_CALLOC(H5D_rdcc_ent_ptr_t, rdcc->nslots);
if (NULL == rdcc->slot)
HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed");
/* Reset any cached chunk info for this dataset */
H5D__chunk_cinfo_cache_reset(&(rdcc->last));
} /* end else */
/* Compute scaled dimension info, if dataset dims > 1 */
if (dset->shared->ndims > 1) {
unsigned u; /* Local index value */
for (u = 0; u < dset->shared->ndims; u++) {
hsize_t scaled_power2up; /* Scaled value, rounded to next power of 2 */
/* Initial scaled dimension sizes */
if (dset->shared->layout.u.chunk.dim[u] == 0)
HGOTO_ERROR(H5E_DATASET, H5E_BADVALUE, FAIL, "chunk size must be > 0, dim = %u ", u);
/* Round up to the next integer # of chunks, to accommodate partial chunks */
rdcc->scaled_dims[u] = (dset->shared->curr_dims[u] + dset->shared->layout.u.chunk.dim[u] - 1) /
dset->shared->layout.u.chunk.dim[u];
if (!(scaled_power2up = H5VM_power2up(rdcc->scaled_dims[u])))
HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "unable to get the next power of 2");
/* Initial 'power2up' values for scaled dimensions */
rdcc->scaled_power2up[u] = scaled_power2up;
/* Number of bits required to encode scaled dimension size */
rdcc->scaled_encode_bits[u] = H5VM_log2_gen(rdcc->scaled_power2up[u]);
} /* end for */
} /* end if */
/* Compose chunked index info struct */
idx_info.f = f;
idx_info.pline = &dset->shared->dcpl_cache.pline;
idx_info.layout = &dset->shared->layout.u.chunk;
idx_info.storage = sc;
/* Allocate any indexing structures */
if (sc->ops->init && (sc->ops->init)(&idx_info, dset->shared->space, dset->oloc.addr) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "can't initialize indexing information");
idx_init = true;
/* Set the number of chunks in dataset, etc. */
if (H5D__chunk_set_info(dset) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to set # of chunks for dataset");
done:
if (FAIL == ret_value) {
if (rdcc->slot)
rdcc->slot = H5FL_SEQ_FREE(H5D_rdcc_ent_ptr_t, rdcc->slot);
if (idx_init && sc->ops->dest && (sc->ops->dest)(&idx_info) < 0)
HDONE_ERROR(H5E_DATASET, H5E_CANTFREE, FAIL, "unable to release chunk index info");
}
FUNC_LEAVE_NOAPI(ret_value)
} /* end H5D__chunk_init() */
/*-------------------------------------------------------------------------
* Function: H5D__chunk_is_space_alloc
*
* Purpose: Query if space is allocated for layout
*
* Return: Non-negative on success/Negative on failure
*
*-------------------------------------------------------------------------
*/
bool
H5D__chunk_is_space_alloc(const H5O_storage_t *storage)
{
const H5O_storage_chunk_t *sc = &(storage->u.chunk);
bool ret_value = false; /* Return value */
FUNC_ENTER_PACKAGE_NOERR
/* Sanity checks */
assert(storage);
H5D_CHUNK_STORAGE_INDEX_CHK(sc);
/* Query index layer */
ret_value = (sc->ops->is_space_alloc)(sc);