1 #define DISABLE_SIGN_COMPARE_WARNINGS
3 #include "git-compat-util.h"
9 #include "object-file.h"
10 #include "hash-lookup.h"
14 #include "run-command.h"
15 #include "chunk-format.h"
16 #include "pack-bitmap.h"
19 #include "list-objects.h"
21 #include "pack-revindex.h"
23 #define PACK_EXPIRED UINT_MAX
24 #define BITMAP_POS_UNKNOWN (~((uint32_t)0))
25 #define MIDX_CHUNK_FANOUT_SIZE (sizeof(uint32_t) * 256)
26 #define MIDX_CHUNK_LARGE_OFFSET_WIDTH (sizeof(uint64_t))
28 extern int midx_checksum_valid(struct multi_pack_index
*m
);
29 extern void clear_midx_files_ext(const char *object_dir
, const char *ext
,
30 const char *keep_hash
);
31 extern void clear_incremental_midx_files_ext(const char *object_dir
,
33 const char **keep_hashes
,
35 extern int cmp_idx_or_pack_name(const char *idx_or_pack_name
,
36 const char *idx_name
);
38 static size_t write_midx_header(const struct git_hash_algo
*hash_algo
,
39 struct hashfile
*f
, unsigned char num_chunks
,
42 hashwrite_be32(f
, MIDX_SIGNATURE
);
43 hashwrite_u8(f
, MIDX_VERSION
);
44 hashwrite_u8(f
, oid_version(hash_algo
));
45 hashwrite_u8(f
, num_chunks
);
46 hashwrite_u8(f
, 0); /* unused */
47 hashwrite_be32(f
, num_packs
);
49 return MIDX_HEADER_SIZE
;
53 uint32_t orig_pack_int_id
;
63 static void fill_pack_info(struct pack_info
*info
,
64 struct packed_git
*p
, const char *pack_name
,
65 uint32_t orig_pack_int_id
)
67 memset(info
, 0, sizeof(struct pack_info
));
69 info
->orig_pack_int_id
= orig_pack_int_id
;
70 info
->pack_name
= xstrdup(pack_name
);
72 info
->bitmap_pos
= BITMAP_POS_UNKNOWN
;
75 static int pack_info_compare(const void *_a
, const void *_b
)
77 struct pack_info
*a
= (struct pack_info
*)_a
;
78 struct pack_info
*b
= (struct pack_info
*)_b
;
79 return strcmp(a
->pack_name
, b
->pack_name
);
82 static int idx_or_pack_name_cmp(const void *_va
, const void *_vb
)
84 const char *pack_name
= _va
;
85 const struct pack_info
*compar
= _vb
;
87 return cmp_idx_or_pack_name(pack_name
, compar
->pack_name
);
90 struct write_midx_context
{
91 struct pack_info
*info
;
94 struct multi_pack_index
*m
;
95 struct multi_pack_index
*base_midx
;
96 struct progress
*progress
;
97 unsigned pack_paths_checked
;
99 struct pack_midx_entry
*entries
;
103 uint32_t *pack_order
;
104 unsigned large_offsets_needed
:1;
105 uint32_t num_large_offsets
;
107 int preferred_pack_idx
;
110 uint32_t num_multi_pack_indexes_before
;
112 struct string_list
*to_include
;
114 struct repository
*repo
;
117 static int should_include_pack(const struct write_midx_context
*ctx
,
118 const char *file_name
)
121 * Note that at most one of ctx->m and ctx->to_include are set,
122 * so we are testing midx_contains_pack() and
123 * string_list_has_string() independently (guarded by the
124 * appropriate NULL checks).
126 * We could support passing to_include while reusing an existing
127 * MIDX, but don't currently since the reuse process drags
128 * forward all packs from an existing MIDX (without checking
129 * whether or not they appear in the to_include list).
131 * If we added support for that, these next two conditional
132 * should be performed independently (likely checking
133 * to_include before the existing MIDX).
135 if (ctx
->m
&& midx_contains_pack(ctx
->m
, file_name
))
137 else if (ctx
->base_midx
&& midx_contains_pack(ctx
->base_midx
,
140 else if (ctx
->to_include
&&
141 !string_list_has_string(ctx
->to_include
, file_name
))
146 static void add_pack_to_midx(const char *full_path
, size_t full_path_len
,
147 const char *file_name
, void *data
)
149 struct write_midx_context
*ctx
= data
;
150 struct packed_git
*p
;
152 if (ends_with(file_name
, ".idx")) {
153 display_progress(ctx
->progress
, ++ctx
->pack_paths_checked
);
155 if (!should_include_pack(ctx
, file_name
))
158 ALLOC_GROW(ctx
->info
, ctx
->nr
+ 1, ctx
->alloc
);
159 p
= add_packed_git(ctx
->repo
, full_path
, full_path_len
, 0);
161 warning(_("failed to add packfile '%s'"),
166 if (open_pack_index(p
)) {
167 warning(_("failed to open pack-index '%s'"),
174 fill_pack_info(&ctx
->info
[ctx
->nr
], p
, file_name
, ctx
->nr
);
179 struct pack_midx_entry
{
180 struct object_id oid
;
181 uint32_t pack_int_id
;
184 unsigned preferred
: 1;
187 static int midx_oid_compare(const void *_a
, const void *_b
)
189 const struct pack_midx_entry
*a
= (const struct pack_midx_entry
*)_a
;
190 const struct pack_midx_entry
*b
= (const struct pack_midx_entry
*)_b
;
191 int cmp
= oidcmp(&a
->oid
, &b
->oid
);
196 /* Sort objects in a preferred pack first when multiple copies exist. */
197 if (a
->preferred
> b
->preferred
)
199 if (a
->preferred
< b
->preferred
)
202 if (a
->pack_mtime
> b
->pack_mtime
)
204 else if (a
->pack_mtime
< b
->pack_mtime
)
207 return a
->pack_int_id
- b
->pack_int_id
;
210 static int nth_midxed_pack_midx_entry(struct multi_pack_index
*m
,
211 struct pack_midx_entry
*e
,
214 if (pos
>= m
->num_objects
+ m
->num_objects_in_base
)
217 nth_midxed_object_oid(&e
->oid
, m
, pos
);
218 e
->pack_int_id
= nth_midxed_pack_int_id(m
, pos
);
219 e
->offset
= nth_midxed_offset(m
, pos
);
221 /* consider objects in midx to be from "old" packs */
226 static void fill_pack_entry(uint32_t pack_int_id
,
227 struct packed_git
*p
,
229 struct pack_midx_entry
*entry
,
232 if (nth_packed_object_id(&entry
->oid
, p
, cur_object
) < 0)
233 die(_("failed to locate object %d in packfile"), cur_object
);
235 entry
->pack_int_id
= pack_int_id
;
236 entry
->pack_mtime
= p
->mtime
;
238 entry
->offset
= nth_packed_object_offset(p
, cur_object
);
239 entry
->preferred
= !!preferred
;
243 struct pack_midx_entry
*entries
;
247 static void midx_fanout_grow(struct midx_fanout
*fanout
, size_t nr
)
250 BUG("negative growth in midx_fanout_grow() (%"PRIuMAX
" < %"PRIuMAX
")",
251 (uintmax_t)nr
, (uintmax_t)fanout
->nr
);
252 ALLOC_GROW(fanout
->entries
, nr
, fanout
->alloc
);
255 static void midx_fanout_sort(struct midx_fanout
*fanout
)
257 QSORT(fanout
->entries
, fanout
->nr
, midx_oid_compare
);
260 static void midx_fanout_add_midx_fanout(struct midx_fanout
*fanout
,
261 struct multi_pack_index
*m
,
265 uint32_t start
= m
->num_objects_in_base
, end
;
269 midx_fanout_add_midx_fanout(fanout
, m
->base_midx
, cur_fanout
,
273 start
+= ntohl(m
->chunk_oid_fanout
[cur_fanout
- 1]);
274 end
= m
->num_objects_in_base
+ ntohl(m
->chunk_oid_fanout
[cur_fanout
]);
276 for (cur_object
= start
; cur_object
< end
; cur_object
++) {
277 if ((preferred_pack
> -1) &&
278 (preferred_pack
== nth_midxed_pack_int_id(m
, cur_object
))) {
280 * Objects from preferred packs are added
286 midx_fanout_grow(fanout
, fanout
->nr
+ 1);
287 nth_midxed_pack_midx_entry(m
,
288 &fanout
->entries
[fanout
->nr
],
290 fanout
->entries
[fanout
->nr
].preferred
= 0;
295 static void midx_fanout_add_pack_fanout(struct midx_fanout
*fanout
,
296 struct pack_info
*info
,
301 struct packed_git
*pack
= info
[cur_pack
].p
;
302 uint32_t start
= 0, end
;
306 start
= get_pack_fanout(pack
, cur_fanout
- 1);
307 end
= get_pack_fanout(pack
, cur_fanout
);
309 for (cur_object
= start
; cur_object
< end
; cur_object
++) {
310 midx_fanout_grow(fanout
, fanout
->nr
+ 1);
311 fill_pack_entry(cur_pack
,
314 &fanout
->entries
[fanout
->nr
],
321 * It is possible to artificially get into a state where there are many
322 * duplicate copies of objects. That can create high memory pressure if
323 * we are to create a list of all objects before de-duplication. To reduce
324 * this memory pressure without a significant performance drop, automatically
325 * group objects by the first byte of their object id. Use the IDX fanout
326 * tables to group the data, copy to a local array, then sort.
328 * Copy only the de-duplicated entries (selected by most-recent modified time
329 * of a packfile containing the object).
331 static void compute_sorted_entries(struct write_midx_context
*ctx
,
334 uint32_t cur_fanout
, cur_pack
, cur_object
;
335 size_t alloc_objects
, total_objects
= 0;
336 struct midx_fanout fanout
= { 0 };
338 for (cur_pack
= start_pack
; cur_pack
< ctx
->nr
; cur_pack
++)
339 total_objects
= st_add(total_objects
,
340 ctx
->info
[cur_pack
].p
->num_objects
);
343 * As we de-duplicate by fanout value, we expect the fanout
344 * slices to be evenly distributed, with some noise. Hence,
345 * allocate slightly more than one 256th.
347 alloc_objects
= fanout
.alloc
= total_objects
> 3200 ? total_objects
/ 200 : 16;
349 ALLOC_ARRAY(fanout
.entries
, fanout
.alloc
);
350 ALLOC_ARRAY(ctx
->entries
, alloc_objects
);
353 for (cur_fanout
= 0; cur_fanout
< 256; cur_fanout
++) {
356 if (ctx
->m
&& !ctx
->incremental
)
357 midx_fanout_add_midx_fanout(&fanout
, ctx
->m
, cur_fanout
,
358 ctx
->preferred_pack_idx
);
360 for (cur_pack
= start_pack
; cur_pack
< ctx
->nr
; cur_pack
++) {
361 int preferred
= cur_pack
== ctx
->preferred_pack_idx
;
362 midx_fanout_add_pack_fanout(&fanout
,
364 preferred
, cur_fanout
);
367 if (-1 < ctx
->preferred_pack_idx
&& ctx
->preferred_pack_idx
< start_pack
)
368 midx_fanout_add_pack_fanout(&fanout
, ctx
->info
,
369 ctx
->preferred_pack_idx
, 1,
372 midx_fanout_sort(&fanout
);
375 * The batch is now sorted by OID and then mtime (descending).
376 * Take only the first duplicate.
378 for (cur_object
= 0; cur_object
< fanout
.nr
; cur_object
++) {
379 if (cur_object
&& oideq(&fanout
.entries
[cur_object
- 1].oid
,
380 &fanout
.entries
[cur_object
].oid
))
382 if (ctx
->incremental
&& ctx
->base_midx
&&
383 midx_has_oid(ctx
->base_midx
,
384 &fanout
.entries
[cur_object
].oid
))
387 ALLOC_GROW(ctx
->entries
, st_add(ctx
->entries_nr
, 1),
389 memcpy(&ctx
->entries
[ctx
->entries_nr
],
390 &fanout
.entries
[cur_object
],
391 sizeof(struct pack_midx_entry
));
396 free(fanout
.entries
);
399 static int write_midx_pack_names(struct hashfile
*f
, void *data
)
401 struct write_midx_context
*ctx
= data
;
403 unsigned char padding
[MIDX_CHUNK_ALIGNMENT
];
406 for (i
= 0; i
< ctx
->nr
; i
++) {
409 if (ctx
->info
[i
].expired
)
412 if (i
&& strcmp(ctx
->info
[i
].pack_name
, ctx
->info
[i
- 1].pack_name
) <= 0)
413 BUG("incorrect pack-file order: %s before %s",
414 ctx
->info
[i
- 1].pack_name
,
415 ctx
->info
[i
].pack_name
);
417 writelen
= strlen(ctx
->info
[i
].pack_name
) + 1;
418 hashwrite(f
, ctx
->info
[i
].pack_name
, writelen
);
422 /* add padding to be aligned */
423 i
= MIDX_CHUNK_ALIGNMENT
- (written
% MIDX_CHUNK_ALIGNMENT
);
424 if (i
< MIDX_CHUNK_ALIGNMENT
) {
425 memset(padding
, 0, sizeof(padding
));
426 hashwrite(f
, padding
, i
);
432 static int write_midx_bitmapped_packs(struct hashfile
*f
, void *data
)
434 struct write_midx_context
*ctx
= data
;
437 for (i
= 0; i
< ctx
->nr
; i
++) {
438 struct pack_info
*pack
= &ctx
->info
[i
];
442 if (pack
->bitmap_pos
== BITMAP_POS_UNKNOWN
&& pack
->bitmap_nr
)
443 BUG("pack '%s' has no bitmap position, but has %d bitmapped object(s)",
444 pack
->pack_name
, pack
->bitmap_nr
);
446 hashwrite_be32(f
, pack
->bitmap_pos
);
447 hashwrite_be32(f
, pack
->bitmap_nr
);
452 static int write_midx_oid_fanout(struct hashfile
*f
,
455 struct write_midx_context
*ctx
= data
;
456 struct pack_midx_entry
*list
= ctx
->entries
;
457 struct pack_midx_entry
*last
= ctx
->entries
+ ctx
->entries_nr
;
462 * Write the first-level table (the list is sorted,
463 * but we use a 256-entry lookup to be able to avoid
464 * having to do eight extra binary search iterations).
466 for (i
= 0; i
< 256; i
++) {
467 struct pack_midx_entry
*next
= list
;
469 while (next
< last
&& next
->oid
.hash
[0] == i
) {
474 hashwrite_be32(f
, count
);
481 static int write_midx_oid_lookup(struct hashfile
*f
,
484 struct write_midx_context
*ctx
= data
;
485 unsigned char hash_len
= ctx
->repo
->hash_algo
->rawsz
;
486 struct pack_midx_entry
*list
= ctx
->entries
;
489 for (i
= 0; i
< ctx
->entries_nr
; i
++) {
490 struct pack_midx_entry
*obj
= list
++;
492 if (i
< ctx
->entries_nr
- 1) {
493 struct pack_midx_entry
*next
= list
;
494 if (oidcmp(&obj
->oid
, &next
->oid
) >= 0)
495 BUG("OIDs not in order: %s >= %s",
496 oid_to_hex(&obj
->oid
),
497 oid_to_hex(&next
->oid
));
500 hashwrite(f
, obj
->oid
.hash
, (int)hash_len
);
506 static int write_midx_object_offsets(struct hashfile
*f
,
509 struct write_midx_context
*ctx
= data
;
510 struct pack_midx_entry
*list
= ctx
->entries
;
511 uint32_t i
, nr_large_offset
= 0;
513 for (i
= 0; i
< ctx
->entries_nr
; i
++) {
514 struct pack_midx_entry
*obj
= list
++;
516 if (ctx
->pack_perm
[obj
->pack_int_id
] == PACK_EXPIRED
)
517 BUG("object %s is in an expired pack with int-id %d",
518 oid_to_hex(&obj
->oid
),
521 hashwrite_be32(f
, ctx
->pack_perm
[obj
->pack_int_id
]);
523 if (ctx
->large_offsets_needed
&& obj
->offset
>> 31)
524 hashwrite_be32(f
, MIDX_LARGE_OFFSET_NEEDED
| nr_large_offset
++);
525 else if (!ctx
->large_offsets_needed
&& obj
->offset
>> 32)
526 BUG("object %s requires a large offset (%"PRIx64
") but the MIDX is not writing large offsets!",
527 oid_to_hex(&obj
->oid
),
530 hashwrite_be32(f
, (uint32_t)obj
->offset
);
536 static int write_midx_large_offsets(struct hashfile
*f
,
539 struct write_midx_context
*ctx
= data
;
540 struct pack_midx_entry
*list
= ctx
->entries
;
541 struct pack_midx_entry
*end
= ctx
->entries
+ ctx
->entries_nr
;
542 uint32_t nr_large_offset
= ctx
->num_large_offsets
;
544 while (nr_large_offset
) {
545 struct pack_midx_entry
*obj
;
549 BUG("too many large-offset objects");
552 offset
= obj
->offset
;
557 hashwrite_be64(f
, offset
);
565 static int write_midx_revindex(struct hashfile
*f
,
568 struct write_midx_context
*ctx
= data
;
571 if (ctx
->incremental
&& ctx
->base_midx
)
572 nr_base
= ctx
->base_midx
->num_objects
+
573 ctx
->base_midx
->num_objects_in_base
;
577 for (i
= 0; i
< ctx
->entries_nr
; i
++)
578 hashwrite_be32(f
, ctx
->pack_order
[i
] + nr_base
);
583 struct midx_pack_order_data
{
589 static int midx_pack_order_cmp(const void *va
, const void *vb
)
591 const struct midx_pack_order_data
*a
= va
, *b
= vb
;
592 if (a
->pack
< b
->pack
)
594 else if (a
->pack
> b
->pack
)
596 else if (a
->offset
< b
->offset
)
598 else if (a
->offset
> b
->offset
)
604 static uint32_t *midx_pack_order(struct write_midx_context
*ctx
)
606 struct midx_pack_order_data
*data
;
607 uint32_t *pack_order
, base_objects
= 0;
610 trace2_region_enter("midx", "midx_pack_order", ctx
->repo
);
612 if (ctx
->incremental
&& ctx
->base_midx
)
613 base_objects
= ctx
->base_midx
->num_objects
+
614 ctx
->base_midx
->num_objects_in_base
;
616 ALLOC_ARRAY(pack_order
, ctx
->entries_nr
);
617 ALLOC_ARRAY(data
, ctx
->entries_nr
);
619 for (i
= 0; i
< ctx
->entries_nr
; i
++) {
620 struct pack_midx_entry
*e
= &ctx
->entries
[i
];
622 data
[i
].pack
= ctx
->pack_perm
[e
->pack_int_id
];
624 data
[i
].pack
|= (1U << 31);
625 data
[i
].offset
= e
->offset
;
628 QSORT(data
, ctx
->entries_nr
, midx_pack_order_cmp
);
630 for (i
= 0; i
< ctx
->entries_nr
; i
++) {
631 struct pack_midx_entry
*e
= &ctx
->entries
[data
[i
].nr
];
632 struct pack_info
*pack
= &ctx
->info
[ctx
->pack_perm
[e
->pack_int_id
]];
633 if (pack
->bitmap_pos
== BITMAP_POS_UNKNOWN
)
634 pack
->bitmap_pos
= i
+ base_objects
;
636 pack_order
[i
] = data
[i
].nr
;
638 for (i
= 0; i
< ctx
->nr
; i
++) {
639 struct pack_info
*pack
= &ctx
->info
[ctx
->pack_perm
[i
]];
640 if (pack
->bitmap_pos
== BITMAP_POS_UNKNOWN
)
641 pack
->bitmap_pos
= 0;
645 trace2_region_leave("midx", "midx_pack_order", ctx
->repo
);
650 static void write_midx_reverse_index(struct write_midx_context
*ctx
,
651 const char *object_dir
,
652 unsigned char *midx_hash
)
654 struct strbuf buf
= STRBUF_INIT
;
657 trace2_region_enter("midx", "write_midx_reverse_index", ctx
->repo
);
659 if (ctx
->incremental
)
660 get_split_midx_filename_ext(ctx
->repo
->hash_algo
, &buf
,
661 object_dir
, midx_hash
,
664 get_midx_filename_ext(ctx
->repo
->hash_algo
, &buf
, object_dir
,
665 midx_hash
, MIDX_EXT_REV
);
667 tmp_file
= write_rev_file_order(ctx
->repo
, NULL
, ctx
->pack_order
,
668 ctx
->entries_nr
, midx_hash
, WRITE_REV
);
670 if (finalize_object_file(tmp_file
, buf
.buf
))
671 die(_("cannot store reverse index file"));
673 strbuf_release(&buf
);
676 trace2_region_leave("midx", "write_midx_reverse_index", ctx
->repo
);
679 static void prepare_midx_packing_data(struct packing_data
*pdata
,
680 struct write_midx_context
*ctx
)
684 trace2_region_enter("midx", "prepare_midx_packing_data", ctx
->repo
);
686 memset(pdata
, 0, sizeof(struct packing_data
));
687 prepare_packing_data(ctx
->repo
, pdata
);
689 for (i
= 0; i
< ctx
->entries_nr
; i
++) {
690 uint32_t pos
= ctx
->pack_order
[i
];
691 struct pack_midx_entry
*from
= &ctx
->entries
[pos
];
692 struct object_entry
*to
= packlist_alloc(pdata
, &from
->oid
);
694 oe_set_in_pack(pdata
, to
,
695 ctx
->info
[ctx
->pack_perm
[from
->pack_int_id
]].p
);
698 trace2_region_leave("midx", "prepare_midx_packing_data", ctx
->repo
);
701 static int add_ref_to_pending(const char *refname
, const char *referent UNUSED
,
702 const struct object_id
*oid
,
703 int flag
, void *cb_data
)
705 struct rev_info
*revs
= (struct rev_info
*)cb_data
;
706 struct object_id peeled
;
707 struct object
*object
;
709 if ((flag
& REF_ISSYMREF
) && (flag
& REF_ISBROKEN
)) {
710 warning("symbolic ref is dangling: %s", refname
);
714 if (!peel_iterated_oid(revs
->repo
, oid
, &peeled
))
717 object
= parse_object_or_die(revs
->repo
, oid
, refname
);
718 if (object
->type
!= OBJ_COMMIT
)
721 add_pending_object(revs
, object
, "");
722 if (bitmap_is_preferred_refname(revs
->repo
, refname
))
723 object
->flags
|= NEEDS_BITMAP
;
727 struct bitmap_commit_cb
{
728 struct commit
**commits
;
729 size_t commits_nr
, commits_alloc
;
731 struct write_midx_context
*ctx
;
734 static const struct object_id
*bitmap_oid_access(size_t index
,
735 const void *_entries
)
737 const struct pack_midx_entry
*entries
= _entries
;
738 return &entries
[index
].oid
;
741 static void bitmap_show_commit(struct commit
*commit
, void *_data
)
743 struct bitmap_commit_cb
*data
= _data
;
744 int pos
= oid_pos(&commit
->object
.oid
, data
->ctx
->entries
,
745 data
->ctx
->entries_nr
,
750 ALLOC_GROW(data
->commits
, data
->commits_nr
+ 1, data
->commits_alloc
);
751 data
->commits
[data
->commits_nr
++] = commit
;
754 static int read_refs_snapshot(const char *refs_snapshot
,
755 struct rev_info
*revs
)
757 struct strbuf buf
= STRBUF_INIT
;
758 struct object_id oid
;
759 FILE *f
= xfopen(refs_snapshot
, "r");
761 while (strbuf_getline(&buf
, f
) != EOF
) {
762 struct object
*object
;
765 const char *end
= NULL
;
767 if (buf
.len
&& *buf
.buf
== '+') {
772 if (parse_oid_hex_algop(hex
, &oid
, &end
, revs
->repo
->hash_algo
) < 0)
773 die(_("could not parse line: %s"), buf
.buf
);
775 die(_("malformed line: %s"), buf
.buf
);
777 object
= parse_object_or_die(revs
->repo
, &oid
, NULL
);
779 object
->flags
|= NEEDS_BITMAP
;
781 add_pending_object(revs
, object
, "");
785 strbuf_release(&buf
);
789 static struct commit
**find_commits_for_midx_bitmap(uint32_t *indexed_commits_nr_p
,
790 const char *refs_snapshot
,
791 struct write_midx_context
*ctx
)
793 struct rev_info revs
;
794 struct bitmap_commit_cb cb
= {0};
796 trace2_region_enter("midx", "find_commits_for_midx_bitmap", ctx
->repo
);
800 repo_init_revisions(ctx
->repo
, &revs
, NULL
);
802 read_refs_snapshot(refs_snapshot
, &revs
);
804 setup_revisions(0, NULL
, &revs
, NULL
);
805 refs_for_each_ref(get_main_ref_store(ctx
->repo
),
806 add_ref_to_pending
, &revs
);
810 * Skipping promisor objects here is intentional, since it only excludes
811 * them from the list of reachable commits that we want to select from
812 * when computing the selection of MIDX'd commits to receive bitmaps.
814 * Reachability bitmaps do require that their objects be closed under
815 * reachability, but fetching any objects missing from promisors at this
816 * point is too late. But, if one of those objects can be reached from
817 * an another object that is included in the bitmap, then we will
818 * complain later that we don't have reachability closure (and fail
821 fetch_if_missing
= 0;
822 revs
.exclude_promisor_objects
= 1;
824 if (prepare_revision_walk(&revs
))
825 die(_("revision walk setup failed"));
827 traverse_commit_list(&revs
, bitmap_show_commit
, NULL
, &cb
);
828 if (indexed_commits_nr_p
)
829 *indexed_commits_nr_p
= cb
.commits_nr
;
831 release_revisions(&revs
);
833 trace2_region_leave("midx", "find_commits_for_midx_bitmap", ctx
->repo
);
838 static int write_midx_bitmap(struct write_midx_context
*ctx
,
839 const char *object_dir
,
840 const unsigned char *midx_hash
,
841 struct packing_data
*pdata
,
842 struct commit
**commits
,
847 uint16_t options
= 0;
848 struct bitmap_writer writer
;
849 struct pack_idx_entry
**index
;
850 struct strbuf bitmap_name
= STRBUF_INIT
;
852 trace2_region_enter("midx", "write_midx_bitmap", ctx
->repo
);
854 if (ctx
->incremental
)
855 get_split_midx_filename_ext(ctx
->repo
->hash_algo
, &bitmap_name
,
856 object_dir
, midx_hash
,
859 get_midx_filename_ext(ctx
->repo
->hash_algo
, &bitmap_name
,
860 object_dir
, midx_hash
, MIDX_EXT_BITMAP
);
862 if (flags
& MIDX_WRITE_BITMAP_HASH_CACHE
)
863 options
|= BITMAP_OPT_HASH_CACHE
;
865 if (flags
& MIDX_WRITE_BITMAP_LOOKUP_TABLE
)
866 options
|= BITMAP_OPT_LOOKUP_TABLE
;
869 * Build the MIDX-order index based on pdata.objects (which is already
870 * in MIDX order; c.f., 'midx_pack_order_cmp()' for the definition of
873 ALLOC_ARRAY(index
, pdata
->nr_objects
);
874 for (i
= 0; i
< pdata
->nr_objects
; i
++)
875 index
[i
] = &pdata
->objects
[i
].idx
;
877 bitmap_writer_init(&writer
, ctx
->repo
, pdata
,
878 ctx
->incremental
? ctx
->base_midx
: NULL
);
879 bitmap_writer_show_progress(&writer
, flags
& MIDX_PROGRESS
);
880 bitmap_writer_build_type_index(&writer
, index
);
883 * bitmap_writer_finish expects objects in lex order, but pack_order
884 * gives us exactly that. use it directly instead of re-sorting the
887 * This changes the order of objects in 'index' between
888 * bitmap_writer_build_type_index and bitmap_writer_finish.
890 * The same re-ordering takes place in the single-pack bitmap code via
891 * write_idx_file(), which is called by finish_tmp_packfile(), which
892 * happens between bitmap_writer_build_type_index() and
893 * bitmap_writer_finish().
895 for (i
= 0; i
< pdata
->nr_objects
; i
++)
896 index
[ctx
->pack_order
[i
]] = &pdata
->objects
[i
].idx
;
898 bitmap_writer_select_commits(&writer
, commits
, commits_nr
);
899 ret
= bitmap_writer_build(&writer
);
903 bitmap_writer_set_checksum(&writer
, midx_hash
);
904 bitmap_writer_finish(&writer
, index
, bitmap_name
.buf
, options
);
908 strbuf_release(&bitmap_name
);
909 bitmap_writer_free(&writer
);
911 trace2_region_leave("midx", "write_midx_bitmap", ctx
->repo
);
916 static struct multi_pack_index
*lookup_multi_pack_index(struct repository
*r
,
917 const char *object_dir
)
919 struct multi_pack_index
*result
= NULL
;
920 struct multi_pack_index
*cur
;
921 char *obj_dir_real
= real_pathdup(object_dir
, 1);
922 struct strbuf cur_path_real
= STRBUF_INIT
;
924 /* Ensure the given object_dir is local, or a known alternate. */
925 odb_find_source(r
->objects
, obj_dir_real
);
927 for (cur
= get_multi_pack_index(r
); cur
; cur
= cur
->next
) {
928 strbuf_realpath(&cur_path_real
, cur
->object_dir
, 1);
929 if (!strcmp(obj_dir_real
, cur_path_real
.buf
)) {
937 strbuf_release(&cur_path_real
);
941 static int fill_packs_from_midx(struct write_midx_context
*ctx
,
942 const char *preferred_pack_name
, uint32_t flags
)
944 struct multi_pack_index
*m
;
946 for (m
= ctx
->m
; m
; m
= m
->base_midx
) {
949 for (i
= 0; i
< m
->num_packs
; i
++) {
950 ALLOC_GROW(ctx
->info
, ctx
->nr
+ 1, ctx
->alloc
);
953 * If generating a reverse index, need to have
954 * packed_git's loaded to compare their
955 * mtimes and object count.
957 * If a preferred pack is specified, need to
958 * have packed_git's loaded to ensure the chosen
959 * preferred pack has a non-zero object count.
961 if (flags
& MIDX_WRITE_REV_INDEX
||
962 preferred_pack_name
) {
963 if (prepare_midx_pack(ctx
->repo
, m
,
964 m
->num_packs_in_base
+ i
)) {
965 error(_("could not load pack"));
969 if (open_pack_index(m
->packs
[i
]))
970 die(_("could not open index for %s"),
971 m
->packs
[i
]->pack_name
);
974 fill_pack_info(&ctx
->info
[ctx
->nr
++], m
->packs
[i
],
976 m
->num_packs_in_base
+ i
);
983 const char *non_split
;
986 {NULL
, MIDX_EXT_MIDX
},
987 {MIDX_EXT_BITMAP
, MIDX_EXT_BITMAP
},
988 {MIDX_EXT_REV
, MIDX_EXT_REV
},
991 static int link_midx_to_chain(struct multi_pack_index
*m
)
993 struct strbuf from
= STRBUF_INIT
;
994 struct strbuf to
= STRBUF_INIT
;
998 if (!m
|| m
->has_chain
) {
1000 * Either no MIDX previously existed, or it was already
1001 * part of a MIDX chain. In both cases, we have nothing
1002 * to link, so return early.
1007 for (i
= 0; i
< ARRAY_SIZE(midx_exts
); i
++) {
1008 const unsigned char *hash
= get_midx_checksum(m
);
1010 get_midx_filename_ext(m
->repo
->hash_algo
, &from
, m
->object_dir
,
1011 hash
, midx_exts
[i
].non_split
);
1012 get_split_midx_filename_ext(m
->repo
->hash_algo
, &to
,
1013 m
->object_dir
, hash
,
1014 midx_exts
[i
].split
);
1016 if (link(from
.buf
, to
.buf
) < 0 && errno
!= ENOENT
) {
1017 ret
= error_errno(_("unable to link '%s' to '%s'"),
1022 strbuf_reset(&from
);
1027 strbuf_release(&from
);
1028 strbuf_release(&to
);
1032 static void clear_midx_files(struct repository
*r
, const char *object_dir
,
1033 const char **hashes
, uint32_t hashes_nr
,
1034 unsigned incremental
)
1038 * - remove all non-incremental MIDX files
1039 * - remove any incremental MIDX files not in the current one
1041 * if non-incremental:
1042 * - remove all incremental MIDX files
1043 * - remove any non-incremental MIDX files not matching the current
1046 struct strbuf buf
= STRBUF_INIT
;
1047 const char *exts
[] = { MIDX_EXT_BITMAP
, MIDX_EXT_REV
, MIDX_EXT_MIDX
};
1050 for (i
= 0; i
< ARRAY_SIZE(exts
); i
++) {
1051 clear_incremental_midx_files_ext(object_dir
, exts
[i
],
1053 for (j
= 0; j
< hashes_nr
; j
++)
1054 clear_midx_files_ext(object_dir
, exts
[i
], hashes
[j
]);
1058 get_midx_filename(r
->hash_algo
, &buf
, object_dir
);
1060 get_midx_chain_filename(&buf
, object_dir
);
1062 if (unlink(buf
.buf
) && errno
!= ENOENT
)
1063 die_errno(_("failed to clear multi-pack-index at %s"), buf
.buf
);
1065 strbuf_release(&buf
);
1068 static int write_midx_internal(struct repository
*r
, const char *object_dir
,
1069 struct string_list
*packs_to_include
,
1070 struct string_list
*packs_to_drop
,
1071 const char *preferred_pack_name
,
1072 const char *refs_snapshot
,
1075 struct strbuf midx_name
= STRBUF_INIT
;
1076 unsigned char midx_hash
[GIT_MAX_RAWSZ
];
1077 uint32_t i
, start_pack
;
1078 struct hashfile
*f
= NULL
;
1079 struct lock_file lk
;
1080 struct tempfile
*incr
;
1081 struct write_midx_context ctx
= { 0 };
1082 int bitmapped_packs_concat_len
= 0;
1083 int pack_name_concat_len
= 0;
1084 int dropped_packs
= 0;
1086 const char **keep_hashes
= NULL
;
1087 struct chunkfile
*cf
;
1089 trace2_region_enter("midx", "write_midx_internal", r
);
1093 ctx
.incremental
= !!(flags
& MIDX_WRITE_INCREMENTAL
);
1095 if (ctx
.incremental
)
1096 strbuf_addf(&midx_name
,
1097 "%s/pack/multi-pack-index.d/tmp_midx_XXXXXX",
1100 get_midx_filename(r
->hash_algo
, &midx_name
, object_dir
);
1101 if (safe_create_leading_directories(r
, midx_name
.buf
))
1102 die_errno(_("unable to create leading directories of %s"),
1105 if (!packs_to_include
|| ctx
.incremental
) {
1106 struct multi_pack_index
*m
= lookup_multi_pack_index(r
, object_dir
);
1107 if (m
&& !midx_checksum_valid(m
)) {
1108 warning(_("ignoring existing multi-pack-index; checksum mismatch"));
1114 * Only reference an existing MIDX when not filtering
1115 * which packs to include, since all packs and objects
1116 * are copied blindly from an existing MIDX if one is
1119 if (ctx
.incremental
)
1121 else if (!packs_to_include
)
1127 ctx
.alloc
= ctx
.m
? ctx
.m
->num_packs
+ ctx
.m
->num_packs_in_base
: 16;
1129 ALLOC_ARRAY(ctx
.info
, ctx
.alloc
);
1131 if (ctx
.incremental
) {
1132 struct multi_pack_index
*m
= ctx
.base_midx
;
1134 if (flags
& MIDX_WRITE_BITMAP
&& load_midx_revindex(m
)) {
1135 error(_("could not load reverse index for MIDX %s"),
1136 hash_to_hex_algop(get_midx_checksum(m
),
1137 m
->repo
->hash_algo
));
1141 ctx
.num_multi_pack_indexes_before
++;
1144 } else if (ctx
.m
&& fill_packs_from_midx(&ctx
, preferred_pack_name
,
1149 start_pack
= ctx
.nr
;
1151 ctx
.pack_paths_checked
= 0;
1152 if (flags
& MIDX_PROGRESS
)
1153 ctx
.progress
= start_delayed_progress(r
,
1154 _("Adding packfiles to multi-pack-index"), 0);
1156 ctx
.progress
= NULL
;
1158 ctx
.to_include
= packs_to_include
;
1160 for_each_file_in_pack_dir(object_dir
, add_pack_to_midx
, &ctx
);
1161 stop_progress(&ctx
.progress
);
1163 if ((ctx
.m
&& ctx
.nr
== ctx
.m
->num_packs
+ ctx
.m
->num_packs_in_base
) &&
1165 !(packs_to_include
|| packs_to_drop
)) {
1166 struct bitmap_index
*bitmap_git
;
1168 int want_bitmap
= flags
& MIDX_WRITE_BITMAP
;
1170 bitmap_git
= prepare_midx_bitmap_git(ctx
.m
);
1171 bitmap_exists
= bitmap_git
&& bitmap_is_midx(bitmap_git
);
1172 free_bitmap_index(bitmap_git
);
1174 if (bitmap_exists
|| !want_bitmap
) {
1176 * The correct MIDX already exists, and so does a
1177 * corresponding bitmap (or one wasn't requested).
1180 clear_midx_files_ext(object_dir
, "bitmap", NULL
);
1185 if (ctx
.incremental
&& !ctx
.nr
)
1186 goto cleanup
; /* nothing to do */
1188 if (preferred_pack_name
) {
1189 ctx
.preferred_pack_idx
= -1;
1191 for (i
= 0; i
< ctx
.nr
; i
++) {
1192 if (!cmp_idx_or_pack_name(preferred_pack_name
,
1193 ctx
.info
[i
].pack_name
)) {
1194 ctx
.preferred_pack_idx
= i
;
1199 if (ctx
.preferred_pack_idx
== -1)
1200 warning(_("unknown preferred pack: '%s'"),
1201 preferred_pack_name
);
1202 } else if (ctx
.nr
&&
1203 (flags
& (MIDX_WRITE_REV_INDEX
| MIDX_WRITE_BITMAP
))) {
1204 struct packed_git
*oldest
= ctx
.info
[ctx
.preferred_pack_idx
].p
;
1205 ctx
.preferred_pack_idx
= 0;
1207 if (packs_to_drop
&& packs_to_drop
->nr
)
1208 BUG("cannot write a MIDX bitmap during expiration");
1211 * set a preferred pack when writing a bitmap to ensure that
1212 * the pack from which the first object is selected in pseudo
1213 * pack-order has all of its objects selected from that pack
1214 * (and not another pack containing a duplicate)
1216 for (i
= 1; i
< ctx
.nr
; i
++) {
1217 struct packed_git
*p
= ctx
.info
[i
].p
;
1219 if (!oldest
->num_objects
|| p
->mtime
< oldest
->mtime
) {
1221 ctx
.preferred_pack_idx
= i
;
1225 if (!oldest
->num_objects
) {
1227 * If all packs are empty; unset the preferred index.
1228 * This is acceptable since there will be no duplicate
1229 * objects to resolve, so the preferred value doesn't
1232 ctx
.preferred_pack_idx
= -1;
1236 * otherwise don't mark any pack as preferred to avoid
1237 * interfering with expiration logic below
1239 ctx
.preferred_pack_idx
= -1;
1242 if (ctx
.preferred_pack_idx
> -1) {
1243 struct packed_git
*preferred
= ctx
.info
[ctx
.preferred_pack_idx
].p
;
1244 if (!preferred
->num_objects
) {
1245 error(_("cannot select preferred pack %s with no objects"),
1246 preferred
->pack_name
);
1252 compute_sorted_entries(&ctx
, start_pack
);
1254 ctx
.large_offsets_needed
= 0;
1255 for (i
= 0; i
< ctx
.entries_nr
; i
++) {
1256 if (ctx
.entries
[i
].offset
> 0x7fffffff)
1257 ctx
.num_large_offsets
++;
1258 if (ctx
.entries
[i
].offset
> 0xffffffff)
1259 ctx
.large_offsets_needed
= 1;
1262 QSORT(ctx
.info
, ctx
.nr
, pack_info_compare
);
1264 if (packs_to_drop
&& packs_to_drop
->nr
) {
1266 int missing_drops
= 0;
1268 for (i
= 0; i
< ctx
.nr
&& drop_index
< packs_to_drop
->nr
; i
++) {
1269 int cmp
= strcmp(ctx
.info
[i
].pack_name
,
1270 packs_to_drop
->items
[drop_index
].string
);
1274 ctx
.info
[i
].expired
= 1;
1275 } else if (cmp
> 0) {
1276 error(_("did not see pack-file %s to drop"),
1277 packs_to_drop
->items
[drop_index
].string
);
1282 ctx
.info
[i
].expired
= 0;
1286 if (missing_drops
) {
1293 * pack_perm stores a permutation between pack-int-ids from the
1294 * previous multi-pack-index to the new one we are writing:
1296 * pack_perm[old_id] = new_id
1298 ALLOC_ARRAY(ctx
.pack_perm
, ctx
.nr
);
1299 for (i
= 0; i
< ctx
.nr
; i
++) {
1300 if (ctx
.info
[i
].expired
) {
1302 ctx
.pack_perm
[ctx
.info
[i
].orig_pack_int_id
] = PACK_EXPIRED
;
1304 ctx
.pack_perm
[ctx
.info
[i
].orig_pack_int_id
] = i
- dropped_packs
;
1308 for (i
= 0; i
< ctx
.nr
; i
++) {
1309 if (ctx
.info
[i
].expired
)
1311 pack_name_concat_len
+= strlen(ctx
.info
[i
].pack_name
) + 1;
1312 bitmapped_packs_concat_len
+= 2 * sizeof(uint32_t);
1315 /* Check that the preferred pack wasn't expired (if given). */
1316 if (preferred_pack_name
) {
1317 struct pack_info
*preferred
= bsearch(preferred_pack_name
,
1320 idx_or_pack_name_cmp
);
1322 uint32_t perm
= ctx
.pack_perm
[preferred
->orig_pack_int_id
];
1323 if (perm
== PACK_EXPIRED
)
1324 warning(_("preferred pack '%s' is expired"),
1325 preferred_pack_name
);
1329 if (pack_name_concat_len
% MIDX_CHUNK_ALIGNMENT
)
1330 pack_name_concat_len
+= MIDX_CHUNK_ALIGNMENT
-
1331 (pack_name_concat_len
% MIDX_CHUNK_ALIGNMENT
);
1333 if (ctx
.nr
- dropped_packs
== 0) {
1334 error(_("no pack files to index."));
1339 if (!ctx
.entries_nr
) {
1340 if (flags
& MIDX_WRITE_BITMAP
)
1341 warning(_("refusing to write multi-pack .bitmap without any objects"));
1342 flags
&= ~(MIDX_WRITE_REV_INDEX
| MIDX_WRITE_BITMAP
);
1345 if (ctx
.incremental
) {
1346 struct strbuf lock_name
= STRBUF_INIT
;
1348 get_midx_chain_filename(&lock_name
, object_dir
);
1349 hold_lock_file_for_update(&lk
, lock_name
.buf
, LOCK_DIE_ON_ERROR
);
1350 strbuf_release(&lock_name
);
1352 incr
= mks_tempfile_m(midx_name
.buf
, 0444);
1354 error(_("unable to create temporary MIDX layer"));
1358 if (adjust_shared_perm(r
, get_tempfile_path(incr
))) {
1359 error(_("unable to adjust shared permissions for '%s'"),
1360 get_tempfile_path(incr
));
1364 f
= hashfd(r
->hash_algo
, get_tempfile_fd(incr
),
1365 get_tempfile_path(incr
));
1367 hold_lock_file_for_update(&lk
, midx_name
.buf
, LOCK_DIE_ON_ERROR
);
1368 f
= hashfd(r
->hash_algo
, get_lock_file_fd(&lk
),
1369 get_lock_file_path(&lk
));
1372 cf
= init_chunkfile(f
);
1374 add_chunk(cf
, MIDX_CHUNKID_PACKNAMES
, pack_name_concat_len
,
1375 write_midx_pack_names
);
1376 add_chunk(cf
, MIDX_CHUNKID_OIDFANOUT
, MIDX_CHUNK_FANOUT_SIZE
,
1377 write_midx_oid_fanout
);
1378 add_chunk(cf
, MIDX_CHUNKID_OIDLOOKUP
,
1379 st_mult(ctx
.entries_nr
, r
->hash_algo
->rawsz
),
1380 write_midx_oid_lookup
);
1381 add_chunk(cf
, MIDX_CHUNKID_OBJECTOFFSETS
,
1382 st_mult(ctx
.entries_nr
, MIDX_CHUNK_OFFSET_WIDTH
),
1383 write_midx_object_offsets
);
1385 if (ctx
.large_offsets_needed
)
1386 add_chunk(cf
, MIDX_CHUNKID_LARGEOFFSETS
,
1387 st_mult(ctx
.num_large_offsets
,
1388 MIDX_CHUNK_LARGE_OFFSET_WIDTH
),
1389 write_midx_large_offsets
);
1391 if (flags
& (MIDX_WRITE_REV_INDEX
| MIDX_WRITE_BITMAP
)) {
1392 ctx
.pack_order
= midx_pack_order(&ctx
);
1393 add_chunk(cf
, MIDX_CHUNKID_REVINDEX
,
1394 st_mult(ctx
.entries_nr
, sizeof(uint32_t)),
1395 write_midx_revindex
);
1396 add_chunk(cf
, MIDX_CHUNKID_BITMAPPEDPACKS
,
1397 bitmapped_packs_concat_len
,
1398 write_midx_bitmapped_packs
);
1401 write_midx_header(r
->hash_algo
, f
, get_num_chunks(cf
),
1402 ctx
.nr
- dropped_packs
);
1403 write_chunkfile(cf
, &ctx
);
1405 finalize_hashfile(f
, midx_hash
, FSYNC_COMPONENT_PACK_METADATA
,
1406 CSUM_FSYNC
| CSUM_HASH_IN_STREAM
);
1409 if (flags
& MIDX_WRITE_REV_INDEX
&&
1410 git_env_bool("GIT_TEST_MIDX_WRITE_REV", 0))
1411 write_midx_reverse_index(&ctx
, object_dir
, midx_hash
);
1413 if (flags
& MIDX_WRITE_BITMAP
) {
1414 struct packing_data pdata
;
1415 struct commit
**commits
;
1416 uint32_t commits_nr
;
1418 if (!ctx
.entries_nr
)
1419 BUG("cannot write a bitmap without any objects");
1421 prepare_midx_packing_data(&pdata
, &ctx
);
1423 commits
= find_commits_for_midx_bitmap(&commits_nr
, refs_snapshot
, &ctx
);
1426 * The previous steps translated the information from
1427 * 'entries' into information suitable for constructing
1428 * bitmaps. We no longer need that array, so clear it to
1429 * reduce memory pressure.
1431 FREE_AND_NULL(ctx
.entries
);
1434 if (write_midx_bitmap(&ctx
, object_dir
,
1435 midx_hash
, &pdata
, commits
, commits_nr
,
1437 error(_("could not write multi-pack bitmap"));
1439 clear_packing_data(&pdata
);
1444 clear_packing_data(&pdata
);
1448 * NOTE: Do not use ctx.entries beyond this point, since it might
1449 * have been freed in the previous if block.
1452 CALLOC_ARRAY(keep_hashes
, ctx
.num_multi_pack_indexes_before
+ 1);
1454 if (ctx
.incremental
) {
1455 FILE *chainf
= fdopen_lock_file(&lk
, "w");
1456 struct strbuf final_midx_name
= STRBUF_INIT
;
1457 struct multi_pack_index
*m
= ctx
.base_midx
;
1460 error_errno(_("unable to open multi-pack-index chain file"));
1464 if (link_midx_to_chain(ctx
.base_midx
) < 0)
1467 get_split_midx_filename_ext(r
->hash_algo
, &final_midx_name
,
1468 object_dir
, midx_hash
, MIDX_EXT_MIDX
);
1470 if (rename_tempfile(&incr
, final_midx_name
.buf
) < 0) {
1471 error_errno(_("unable to rename new multi-pack-index layer"));
1475 strbuf_release(&final_midx_name
);
1477 keep_hashes
[ctx
.num_multi_pack_indexes_before
] =
1478 xstrdup(hash_to_hex_algop(midx_hash
, r
->hash_algo
));
1480 for (i
= 0; i
< ctx
.num_multi_pack_indexes_before
; i
++) {
1481 uint32_t j
= ctx
.num_multi_pack_indexes_before
- i
- 1;
1483 keep_hashes
[j
] = xstrdup(hash_to_hex_algop(get_midx_checksum(m
),
1488 for (i
= 0; i
< ctx
.num_multi_pack_indexes_before
+ 1; i
++)
1489 fprintf(get_lock_file_fp(&lk
), "%s\n", keep_hashes
[i
]);
1491 keep_hashes
[ctx
.num_multi_pack_indexes_before
] =
1492 xstrdup(hash_to_hex_algop(midx_hash
, r
->hash_algo
));
1495 if (ctx
.m
|| ctx
.base_midx
)
1496 close_object_store(ctx
.repo
->objects
);
1498 if (commit_lock_file(&lk
) < 0)
1499 die_errno(_("could not write multi-pack-index"));
1501 clear_midx_files(r
, object_dir
, keep_hashes
,
1502 ctx
.num_multi_pack_indexes_before
+ 1,
1506 for (i
= 0; i
< ctx
.nr
; i
++) {
1507 if (ctx
.info
[i
].p
) {
1508 close_pack(ctx
.info
[i
].p
);
1509 free(ctx
.info
[i
].p
);
1511 free(ctx
.info
[i
].pack_name
);
1516 free(ctx
.pack_perm
);
1517 free(ctx
.pack_order
);
1519 for (i
= 0; i
< ctx
.num_multi_pack_indexes_before
+ 1; i
++)
1520 free((char *)keep_hashes
[i
]);
1523 strbuf_release(&midx_name
);
1525 trace2_region_leave("midx", "write_midx_internal", r
);
1530 int write_midx_file(struct repository
*r
, const char *object_dir
,
1531 const char *preferred_pack_name
,
1532 const char *refs_snapshot
, unsigned flags
)
1534 return write_midx_internal(r
, object_dir
, NULL
, NULL
,
1535 preferred_pack_name
, refs_snapshot
,
1539 int write_midx_file_only(struct repository
*r
, const char *object_dir
,
1540 struct string_list
*packs_to_include
,
1541 const char *preferred_pack_name
,
1542 const char *refs_snapshot
, unsigned flags
)
1544 return write_midx_internal(r
, object_dir
, packs_to_include
, NULL
,
1545 preferred_pack_name
, refs_snapshot
, flags
);
1548 int expire_midx_packs(struct repository
*r
, const char *object_dir
, unsigned flags
)
1550 uint32_t i
, *count
, result
= 0;
1551 struct string_list packs_to_drop
= STRING_LIST_INIT_DUP
;
1552 struct multi_pack_index
*m
= lookup_multi_pack_index(r
, object_dir
);
1553 struct progress
*progress
= NULL
;
1559 die(_("cannot expire packs from an incremental multi-pack-index"));
1561 CALLOC_ARRAY(count
, m
->num_packs
);
1563 if (flags
& MIDX_PROGRESS
)
1564 progress
= start_delayed_progress(
1566 _("Counting referenced objects"),
1568 for (i
= 0; i
< m
->num_objects
; i
++) {
1569 uint32_t pack_int_id
= nth_midxed_pack_int_id(m
, i
);
1570 count
[pack_int_id
]++;
1571 display_progress(progress
, i
+ 1);
1573 stop_progress(&progress
);
1575 if (flags
& MIDX_PROGRESS
)
1576 progress
= start_delayed_progress(
1578 _("Finding and deleting unreferenced packfiles"),
1580 for (i
= 0; i
< m
->num_packs
; i
++) {
1582 display_progress(progress
, i
+ 1);
1587 if (prepare_midx_pack(r
, m
, i
))
1590 if (m
->packs
[i
]->pack_keep
|| m
->packs
[i
]->is_cruft
)
1593 pack_name
= xstrdup(m
->packs
[i
]->pack_name
);
1594 close_pack(m
->packs
[i
]);
1596 string_list_insert(&packs_to_drop
, m
->pack_names
[i
]);
1597 unlink_pack_path(pack_name
, 0);
1600 stop_progress(&progress
);
1604 if (packs_to_drop
.nr
)
1605 result
= write_midx_internal(r
, object_dir
, NULL
,
1606 &packs_to_drop
, NULL
, NULL
, flags
);
1608 string_list_clear(&packs_to_drop
, 0);
1613 struct repack_info
{
1615 uint32_t referenced_objects
;
1616 uint32_t pack_int_id
;
1619 static int compare_by_mtime(const void *a_
, const void *b_
)
1621 const struct repack_info
*a
, *b
;
1623 a
= (const struct repack_info
*)a_
;
1624 b
= (const struct repack_info
*)b_
;
1626 if (a
->mtime
< b
->mtime
)
1628 if (a
->mtime
> b
->mtime
)
1633 static int want_included_pack(struct repository
*r
,
1634 struct multi_pack_index
*m
,
1635 int pack_kept_objects
,
1636 uint32_t pack_int_id
)
1638 struct packed_git
*p
;
1639 if (prepare_midx_pack(r
, m
, pack_int_id
))
1641 p
= m
->packs
[pack_int_id
];
1642 if (!pack_kept_objects
&& p
->pack_keep
)
1646 if (open_pack_index(p
) || !p
->num_objects
)
1651 static void fill_included_packs_all(struct repository
*r
,
1652 struct multi_pack_index
*m
,
1653 unsigned char *include_pack
)
1656 int pack_kept_objects
= 0;
1658 repo_config_get_bool(r
, "repack.packkeptobjects", &pack_kept_objects
);
1660 for (i
= 0; i
< m
->num_packs
; i
++) {
1661 if (!want_included_pack(r
, m
, pack_kept_objects
, i
))
1664 include_pack
[i
] = 1;
1668 static void fill_included_packs_batch(struct repository
*r
,
1669 struct multi_pack_index
*m
,
1670 unsigned char *include_pack
,
1675 struct repack_info
*pack_info
;
1676 int pack_kept_objects
= 0;
1678 CALLOC_ARRAY(pack_info
, m
->num_packs
);
1680 repo_config_get_bool(r
, "repack.packkeptobjects", &pack_kept_objects
);
1682 for (i
= 0; i
< m
->num_packs
; i
++) {
1683 pack_info
[i
].pack_int_id
= i
;
1685 if (prepare_midx_pack(r
, m
, i
))
1688 pack_info
[i
].mtime
= m
->packs
[i
]->mtime
;
1691 for (i
= 0; i
< m
->num_objects
; i
++) {
1692 uint32_t pack_int_id
= nth_midxed_pack_int_id(m
, i
);
1693 pack_info
[pack_int_id
].referenced_objects
++;
1696 QSORT(pack_info
, m
->num_packs
, compare_by_mtime
);
1699 for (i
= 0; total_size
< batch_size
&& i
< m
->num_packs
; i
++) {
1700 uint32_t pack_int_id
= pack_info
[i
].pack_int_id
;
1701 struct packed_git
*p
= m
->packs
[pack_int_id
];
1702 uint64_t expected_size
;
1704 if (!want_included_pack(r
, m
, pack_kept_objects
, pack_int_id
))
1708 * Use shifted integer arithmetic to calculate the
1709 * expected pack size to ~4 significant digits without
1710 * overflow for packsizes less that 1PB.
1712 expected_size
= (uint64_t)pack_info
[i
].referenced_objects
<< 14;
1713 expected_size
/= p
->num_objects
;
1714 expected_size
= u64_mult(expected_size
, p
->pack_size
);
1715 expected_size
= u64_add(expected_size
, 1u << 13) >> 14;
1717 if (expected_size
>= batch_size
)
1720 if (unsigned_add_overflows(total_size
, (size_t)expected_size
))
1721 total_size
= SIZE_MAX
;
1723 total_size
+= expected_size
;
1725 include_pack
[pack_int_id
] = 1;
1731 int midx_repack(struct repository
*r
, const char *object_dir
, size_t batch_size
, unsigned flags
)
1734 uint32_t i
, packs_to_repack
= 0;
1735 unsigned char *include_pack
;
1736 struct child_process cmd
= CHILD_PROCESS_INIT
;
1738 struct multi_pack_index
*m
= lookup_multi_pack_index(r
, object_dir
);
1741 * When updating the default for these configuration
1742 * variables in builtin/repack.c, these must be adjusted
1745 int delta_base_offset
= 1;
1746 int use_delta_islands
= 0;
1751 die(_("cannot repack an incremental multi-pack-index"));
1753 CALLOC_ARRAY(include_pack
, m
->num_packs
);
1756 fill_included_packs_batch(r
, m
, include_pack
, batch_size
);
1758 fill_included_packs_all(r
, m
, include_pack
);
1760 for (i
= 0; i
< m
->num_packs
; i
++) {
1761 if (include_pack
[i
])
1764 if (packs_to_repack
<= 1)
1767 repo_config_get_bool(r
, "repack.usedeltabaseoffset", &delta_base_offset
);
1768 repo_config_get_bool(r
, "repack.usedeltaislands", &use_delta_islands
);
1770 strvec_push(&cmd
.args
, "pack-objects");
1772 strvec_pushf(&cmd
.args
, "%s/pack/pack", object_dir
);
1774 if (delta_base_offset
)
1775 strvec_push(&cmd
.args
, "--delta-base-offset");
1776 if (use_delta_islands
)
1777 strvec_push(&cmd
.args
, "--delta-islands");
1779 if (flags
& MIDX_PROGRESS
)
1780 strvec_push(&cmd
.args
, "--progress");
1782 strvec_push(&cmd
.args
, "-q");
1785 cmd
.in
= cmd
.out
= -1;
1787 if (start_command(&cmd
)) {
1788 error(_("could not start pack-objects"));
1793 cmd_in
= xfdopen(cmd
.in
, "w");
1795 for (i
= 0; i
< m
->num_objects
; i
++) {
1796 struct object_id oid
;
1797 uint32_t pack_int_id
= nth_midxed_pack_int_id(m
, i
);
1799 if (!include_pack
[pack_int_id
])
1802 nth_midxed_object_oid(&oid
, m
, i
);
1803 fprintf(cmd_in
, "%s\n", oid_to_hex(&oid
));
1807 if (finish_command(&cmd
)) {
1808 error(_("could not finish pack-objects"));
1813 result
= write_midx_internal(r
, object_dir
, NULL
, NULL
, NULL
, NULL
,