diff --git a/allchblk.c b/allchblk.c index 22e298e0a..8601018ae 100644 --- a/allchblk.c +++ b/allchblk.c @@ -1043,7 +1043,11 @@ GC_INNER void GC_freehblk(struct hblk *hbp) /* space at once. If we don't catch it here, strange things happen */ /* later. */ + // Clear all the mark bytes to prevent blocks from lingering as + // uncollectable. Instead, this should only be opted-in on each allocation. + BZERO(hhdr->hb_marks, sizeof(hhdr->hb_marks)); GC_remove_counts(hbp, (size_t)size); + hhdr -> hb_sz = size; # ifdef USE_MUNMAP hhdr -> hb_last_reclaimed = (unsigned short)GC_gc_no; diff --git a/include/private/gc_pmark.h b/include/private/gc_pmark.h index 7be3406f3..22bc05e95 100644 --- a/include/private/gc_pmark.h +++ b/include/private/gc_pmark.h @@ -153,7 +153,7 @@ GC_INLINE mse * GC_push_obj(ptr_t obj, const hdr * hhdr, mse * mark_stack_top, { /* cannot use do-while(0) here */ \ char * mark_byte_addr = (char *)(hhdr)->hb_marks + (bit_no); \ if (*mark_byte_addr != 0) break; /* go to the enclosing loop end */ \ - *mark_byte_addr = 1; \ + *mark_byte_addr |= MARK_TAG; \ } # endif /* !PARALLEL_MARK */ #else diff --git a/malloc.c b/malloc.c index 5b56742fc..c1196b2d6 100644 --- a/malloc.c +++ b/malloc.c @@ -315,7 +315,10 @@ GC_INNER void * GC_generic_malloc_aligned(size_t lb, int k, unsigned flags, GC_API GC_ATTR_MALLOC void * GC_CALL GC_generic_malloc(size_t lb, int k) { - return GC_generic_malloc_aligned(lb, k, 0 /* flags */, 0 /* align_m1 */); + void* ptr = GC_generic_malloc_aligned(lb, k, 0 /* flags */, 0 /* align_m1 */); + if (ptr) + GC_clear_uncollectable(ptr); + return ptr; } GC_API GC_ATTR_MALLOC void * GC_CALL GC_malloc_kind_global(size_t lb, int k) @@ -451,7 +454,14 @@ GC_API GC_ATTR_MALLOC void * GC_CALL GC_generic_malloc_uncollectable( /* Allocate lb bytes of pointerful, traced, but not collectible data. */ GC_API GC_ATTR_MALLOC void * GC_CALL GC_malloc_uncollectable(size_t lb) { +#ifdef FINALIZER_ELISION + void* ptr = GC_generic_malloc(lb, NORMAL); + if (ptr) + GC_set_uncollectable(ptr); + return ptr; +#else return GC_generic_malloc_uncollectable(lb, UNCOLLECTABLE); +#endif } #ifdef GC_ATOMIC_UNCOLLECTABLE @@ -667,8 +677,10 @@ GC_API void GC_CALL GC_free(void * p) if (EXPECT(NULL == hhdr, FALSE)) return; # endif GC_ASSERT(GC_base(p) == p); + GC_ASSERT(GC_is_uncollectable(p)); LOCK(); free_internal(p, hhdr); + FREE_PROFILER_HOOK(p); UNLOCK(); } diff --git a/mallocx.c b/mallocx.c index 0481d93af..491e5010b 100644 --- a/mallocx.c +++ b/mallocx.c @@ -165,6 +165,8 @@ GC_API void * GC_CALL GC_realloc(void * p, size_t lb) } result = GC_generic_or_special_malloc((word)lb, obj_kind); if (EXPECT(result != NULL, TRUE)) { + if(GC_is_uncollectable(p)) + GC_set_uncollectable(result); /* In case of shrink, it could also return original object. */ /* But this gives the client warning of imminent disaster. */ BCOPY(p, result, sz); diff --git a/mark.c b/mark.c index 8bb8b70f1..63efe09c1 100644 --- a/mark.c +++ b/mark.c @@ -187,8 +187,13 @@ GC_INNER void GC_clear_hdr_marks(hdr *hhdr) /* No race as GC_realloc holds the allocator lock while updating hb_sz. */ last_bit = FINAL_MARK_BIT((size_t)hhdr->hb_sz); # endif + unsigned i; + size_t sz = (size_t)hhdr->hb_sz; + unsigned n_marks = (unsigned)FINAL_MARK_BIT(sz); - BZERO(hhdr -> hb_marks, sizeof(hhdr->hb_marks)); + for (i = 0; i <= n_marks; i += (unsigned)MARK_BIT_OFFSET(sz)) { + hhdr -> hb_marks[i] &= ~MARK_TAG; + } set_mark_bit_from_hdr(hhdr, last_bit); hhdr -> hb_n_marks = 0; } diff --git a/reclaim.c b/reclaim.c index 93c0f9123..acc167833 100644 --- a/reclaim.c +++ b/reclaim.c @@ -201,12 +201,13 @@ STATIC ptr_t GC_reclaim_clear(struct hblk *hbp, const hdr *hhdr, word sz, p = hbp -> hb_body; plim = p + HBLKSIZE - sz; for (bit_no = 0; ADDR_GE(plim, p); bit_no += MARK_BIT_OFFSET(sz)) { - if (mark_bit_from_hdr(hhdr, bit_no)) { + if (mark_bit_from_hdr(hhdr, bit_no) || uncollectable_bit_from_hdr(hhdr, bit_no)) { p += sz; } else { /* The object is available - put it on list. */ obj_link(p) = list; list = p; + clear_uncollectable_bit_from_hdr(HDR(p), bit_no); FREE_PROFILER_HOOK(p); p = (ptr_t)GC_clear_block((word *)p, sz, pcount); } @@ -231,11 +232,12 @@ STATIC ptr_t GC_reclaim_uninit(struct hblk *hbp, const hdr *hhdr, word sz, plim = (ptr_t)hbp + HBLKSIZE - sz; for (bit_no = 0; ADDR_GE(plim, p); bit_no += MARK_BIT_OFFSET(sz), p += sz) { - if (!mark_bit_from_hdr(hhdr, bit_no)) { + if (!mark_bit_from_hdr(hhdr, bit_no) && !uncollectable_bit_from_hdr(hhdr, bit_no)) { n_bytes_found += sz; /* The object is available - put it on list. */ obj_link(p) = list; list = p; + clear_uncollectable_bit_from_hdr(HDR(p), bit_no); FREE_PROFILER_HOOK(p); } } @@ -293,7 +295,7 @@ STATIC void GC_reclaim_check(struct hblk *hbp, const hdr *hhdr, word sz) plim = p + HBLKSIZE - sz; for (bit_no = 0; ADDR_GE(plim, p); bit_no += MARK_BIT_OFFSET(sz), p += sz) { - if (!mark_bit_from_hdr(hhdr, bit_no)) + if (!mark_bit_from_hdr(hhdr, bit_no) && !uncollectable_bit_from_hdr(hhdr, bit_no)) GC_add_leaked(p); } } @@ -866,7 +868,7 @@ STATIC void GC_CALLBACK GC_do_enumerate_reachable_objects(struct hblk *hbp, } /* Go through all objects in the block. */ for (bit_no = 0; ADDR_GE(plim, p); bit_no += MARK_BIT_OFFSET(sz), p += sz) { - if (mark_bit_from_hdr(hhdr, bit_no)) { + if (mark_bit_from_hdr(hhdr, bit_no) || uncollectable_bit_from_hdr(hhdr, bit_no)) { ((struct enumerate_reachable_s *)ped)->proc(p, sz, ((struct enumerate_reachable_s *)ped)->client_data); }