Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[Backport] 8159440: Move marking of promoted objects during initial m… #696

Merged
merged 1 commit into from
Oct 26, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 0 additions & 2 deletions src/hotspot/share/gc/g1/g1Allocator.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -83,8 +83,6 @@ void G1Allocator::reuse_retained_old_region(EvacuationInfo& evacuation_info,
// we allocate to in the region sets. We'll re-add it later, when
// it's retired again.
_g1h->old_set_remove(retained_region);
bool during_im = _g1h->collector_state()->in_initial_mark_gc();
retained_region->note_start_of_copying(during_im);
old->set(retained_region);
_g1h->hr_printer()->reuse(retained_region);
evacuation_info.set_alloc_regions_used_before(retained_region->used());
Expand Down
9 changes: 5 additions & 4 deletions src/hotspot/share/gc/g1/g1CollectedHeap.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -4930,8 +4930,6 @@ HeapRegion* G1CollectedHeap::new_gc_alloc_region(size_t word_size, InCSetState d
}
_g1_policy->remset_tracker()->update_at_allocate(new_alloc_region);
_hr_printer.alloc(new_alloc_region);
bool during_im = collector_state()->in_initial_mark_gc();
new_alloc_region->note_start_of_copying(during_im);
return new_alloc_region;
}
return NULL;
Expand All @@ -4940,12 +4938,15 @@ HeapRegion* G1CollectedHeap::new_gc_alloc_region(size_t word_size, InCSetState d
void G1CollectedHeap::retire_gc_alloc_region(HeapRegion* alloc_region,
size_t allocated_bytes,
InCSetState dest) {
bool during_im = collector_state()->in_initial_mark_gc();
alloc_region->note_end_of_copying(during_im);
g1_policy()->record_bytes_copied_during_gc(allocated_bytes);
if (dest.is_old()) {
_old_set.add(alloc_region);
}

bool const during_im = collector_state()->in_initial_mark_gc();
if (during_im && allocated_bytes > 0) {
_cm->root_regions()->add(alloc_region);
}
_hr_printer.retire(alloc_region);
}

Expand Down
65 changes: 39 additions & 26 deletions src/hotspot/share/gc/g1/g1ConcurrentMark.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -257,21 +257,35 @@ void G1CMMarkStack::set_empty() {
_free_list = NULL;
}

G1CMRootRegions::G1CMRootRegions() :
_survivors(NULL), _cm(NULL), _scan_in_progress(false),
_should_abort(false), _claimed_survivor_index(0) { }
G1CMRootRegions::G1CMRootRegions(uint const max_regions) :
_root_regions(NEW_C_HEAP_ARRAY(HeapRegion*, max_regions, mtGC)),
_max_regions(max_regions),
_num_root_regions(0),
_claimed_root_regions(0),
_scan_in_progress(false),
_should_abort(false) { }

void G1CMRootRegions::init(const G1SurvivorRegions* survivors, G1ConcurrentMark* cm) {
_survivors = survivors;
_cm = cm;
G1CMRootRegions::~G1CMRootRegions() {
FREE_C_HEAP_ARRAY(HeapRegion*, _max_regions);
}

void G1CMRootRegions::reset() {
_num_root_regions = 0;
}

void G1CMRootRegions::add(HeapRegion* hr) {
assert_at_safepoint();
size_t idx = Atomic::add((size_t)1, &_num_root_regions) - 1;
assert(idx < _max_regions, "Trying to add more root regions than there is space " SIZE_FORMAT, _max_regions);
_root_regions[idx] = hr;
}

void G1CMRootRegions::prepare_for_scan() {
assert(!scan_in_progress(), "pre-condition");

// Currently, only survivors can be root regions.
_claimed_survivor_index = 0;
_scan_in_progress = _survivors->regions()->is_nonempty();
_scan_in_progress = _num_root_regions > 0;

_claimed_root_regions = 0;
_should_abort = false;
}

Expand All @@ -282,18 +296,19 @@ HeapRegion* G1CMRootRegions::claim_next() {
return NULL;
}

// Currently, only survivors can be root regions.
const GrowableArray<HeapRegion*>* survivor_regions = _survivors->regions();
if (_claimed_root_regions >= _num_root_regions) {
return NULL;
}

int claimed_index = Atomic::add(1, &_claimed_survivor_index) - 1;
if (claimed_index < survivor_regions->length()) {
return survivor_regions->at(claimed_index);
size_t claimed_index = Atomic::add((size_t)1, &_claimed_root_regions) - 1;
if (claimed_index < _num_root_regions) {
return _root_regions[claimed_index];
}
return NULL;
}

uint G1CMRootRegions::num_root_regions() const {
return (uint)_survivors->regions()->length();
return (uint)_num_root_regions;
}

void G1CMRootRegions::notify_scan_done() {
Expand All @@ -309,12 +324,10 @@ void G1CMRootRegions::cancel_scan() {
void G1CMRootRegions::scan_finished() {
assert(scan_in_progress(), "pre-condition");

// Currently, only survivors can be root regions.
if (!_should_abort) {
assert(_claimed_survivor_index >= 0, "otherwise comparison is invalid: %d", _claimed_survivor_index);
assert((uint)_claimed_survivor_index >= _survivors->length(),
"we should have claimed all survivors, claimed index = %u, length = %u",
(uint)_claimed_survivor_index, _survivors->length());
assert(_claimed_root_regions >= num_root_regions(),
"we should have claimed all root regions, claimed " SIZE_FORMAT ", length = %u",
_claimed_root_regions, num_root_regions());
}

notify_scan_done();
Expand Down Expand Up @@ -355,7 +368,7 @@ G1ConcurrentMark::G1ConcurrentMark(G1CollectedHeap* g1h,

_heap(_g1h->reserved_region()),

_root_regions(),
_root_regions(_g1h->max_regions()),

_global_mark_stack(),

Expand Down Expand Up @@ -411,8 +424,6 @@ G1ConcurrentMark::G1ConcurrentMark(G1CollectedHeap* g1h,
SATBMarkQueueSet& satb_qs = G1BarrierSet::satb_mark_queue_set();
satb_qs.set_buffer_size(G1SATBBufferSize);

_root_regions.init(_g1h->survivor(), this);

if (FLAG_IS_DEFAULT(ConcGCThreads) || ConcGCThreads == 0) {
// Calculate the number of concurrent worker threads by scaling
// the number of parallel GC threads.
Expand Down Expand Up @@ -755,6 +766,8 @@ void G1ConcurrentMark::pre_initial_mark() {
// For each region note start of marking.
NoteStartOfMarkHRClosure startcl;
_g1h->heap_region_iterate(&startcl);

_root_regions.reset();
}


Expand Down Expand Up @@ -886,12 +899,12 @@ uint G1ConcurrentMark::calc_active_marking_workers() {
}

void G1ConcurrentMark::scan_root_region(HeapRegion* hr, uint worker_id) {
// Currently, only survivors can be root regions.
assert(hr->next_top_at_mark_start() == hr->bottom(), "invariant");
assert(hr->is_old() || (hr->is_survivor() && hr->next_top_at_mark_start() == hr->bottom()),
"Root regions must be old or survivor but region %u is %s", hr->hrm_index(), hr->get_type_str());
G1RootRegionScanClosure cl(_g1h, this, worker_id);

const uintx interval = PrefetchScanIntervalInBytes;
HeapWord* curr = hr->bottom();
HeapWord* curr = hr->next_top_at_mark_start();
const HeapWord* end = hr->top();
while (curr < end) {
Prefetch::read(curr, interval);
Expand Down
55 changes: 28 additions & 27 deletions src/hotspot/share/gc/g1/g1ConcurrentMark.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -225,34 +225,37 @@ class G1CMMarkStack {
template<typename Fn> void iterate(Fn fn) const PRODUCT_RETURN;
};

// Root Regions are regions that are not empty at the beginning of a
// marking cycle and which we might collect during an evacuation pause
// while the cycle is active. Given that, during evacuation pauses, we
// do not copy objects that are explicitly marked, what we have to do
// for the root regions is to scan them and mark all objects reachable
// from them. According to the SATB assumptions, we only need to visit
// each object once during marking. So, as long as we finish this scan
// before the next evacuation pause, we can copy the objects from the
// root regions without having to mark them or do anything else to them.
//
// Currently, we only support root region scanning once (at the start
// of the marking cycle) and the root regions are all the survivor
// regions populated during the initial-mark pause.
// Root Regions are regions that contain objects from nTAMS to top. These are roots
// for marking, i.e. their referenced objects must be kept alive to maintain the
// SATB invariant.
// We could scan and mark them through during the initial-mark pause, but for
// pause time reasons we move this work to the concurrent phase.
// We need to complete this procedure before the next GC because it might determine
// that some of these "root objects" are dead, potentially dropping some required
// references.
// Root regions comprise of the complete contents of survivor regions, and any
// objects copied into old gen during GC.
class G1CMRootRegions {
private:
const G1SurvivorRegions* _survivors;
G1ConcurrentMark* _cm;
HeapRegion** _root_regions;
size_t const _max_regions;

volatile size_t _num_root_regions; // Actual number of root regions.

volatile size_t _claimed_root_regions; // Number of root regions currently claimed.

volatile bool _scan_in_progress;
volatile bool _should_abort;
volatile int _claimed_survivor_index;
volatile bool _scan_in_progress;
volatile bool _should_abort;

void notify_scan_done();

public:
G1CMRootRegions();
// We actually do most of the initialization in this method.
void init(const G1SurvivorRegions* survivors, G1ConcurrentMark* cm);
G1CMRootRegions(uint const max_regions);
~G1CMRootRegions();

// Reset the data structure to allow addition of new root regions.
void reset();

void add(HeapRegion* hr);

// Reset the claiming / scanning of the root regions.
void prepare_for_scan();
Expand Down Expand Up @@ -558,7 +561,7 @@ class G1ConcurrentMark : public CHeapObj<mtGC> {
// them.
void scan_root_regions();

// Scan a single root region and mark everything reachable from it.
// Scan a single root region from nTAMS to top and mark everything reachable from it.
void scan_root_region(HeapRegion* hr, uint worker_id);

// Do concurrent phase of marking, to a tentative transitive closure.
Expand Down Expand Up @@ -598,10 +601,8 @@ class G1ConcurrentMark : public CHeapObj<mtGC> {
void print_on_error(outputStream* st) const;

// Mark the given object on the next bitmap if it is below nTAMS.
// If the passed obj_size is zero, it is recalculated from the given object if
// needed. This is to be as lazy as possible with accessing the object's size.
inline bool mark_in_next_bitmap(uint worker_id, HeapRegion* const hr, oop const obj, size_t const obj_size = 0);
inline bool mark_in_next_bitmap(uint worker_id, oop const obj, size_t const obj_size = 0);
inline bool mark_in_next_bitmap(uint worker_id, HeapRegion* const hr, oop const obj);
inline bool mark_in_next_bitmap(uint worker_id, oop const obj);

inline bool is_marked_in_next_bitmap(oop p) const;

Expand Down
8 changes: 4 additions & 4 deletions src/hotspot/share/gc/g1/g1ConcurrentMark.inline.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -55,12 +55,12 @@ inline bool G1CMSubjectToDiscoveryClosure::do_object_b(oop obj) {
return _g1h->heap_region_containing(obj)->is_old_or_humongous();
}

inline bool G1ConcurrentMark::mark_in_next_bitmap(uint const worker_id, oop const obj, size_t const obj_size) {
inline bool G1ConcurrentMark::mark_in_next_bitmap(uint const worker_id, oop const obj) {
HeapRegion* const hr = _g1h->heap_region_containing(obj);
return mark_in_next_bitmap(worker_id, hr, obj, obj_size);
return mark_in_next_bitmap(worker_id, hr, obj);
}

inline bool G1ConcurrentMark::mark_in_next_bitmap(uint const worker_id, HeapRegion* const hr, oop const obj, size_t const obj_size) {
inline bool G1ConcurrentMark::mark_in_next_bitmap(uint const worker_id, HeapRegion* const hr, oop const obj) {
assert(hr != NULL, "just checking");
assert(hr->is_in_reserved(obj), "Attempting to mark object at " PTR_FORMAT " that is not contained in the given region %u", p2i(obj), hr->hrm_index());

Expand All @@ -76,7 +76,7 @@ inline bool G1ConcurrentMark::mark_in_next_bitmap(uint const worker_id, HeapRegi

bool success = _next_mark_bitmap->par_mark(obj_addr);
if (success) {
add_to_liveness(worker_id, obj, obj_size == 0 ? obj->size() : obj_size);
add_to_liveness(worker_id, obj, obj->size());
}
return success;
}
Expand Down
2 changes: 1 addition & 1 deletion src/hotspot/share/gc/g1/g1EvacFailure.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -126,7 +126,7 @@ class RemoveSelfForwardPtrObjClosure: public ObjectClosure {
// explicitly and all objects in the CSet are considered
// (implicitly) live. So, we won't mark them explicitly and
// we'll leave them over NTAMS.
_cm->mark_in_next_bitmap(_worker_id, obj);
_cm->mark_in_next_bitmap(_worker_id, _hr, obj);
}
size_t obj_size = obj->size();

Expand Down
5 changes: 0 additions & 5 deletions src/hotspot/share/gc/g1/g1OopClosures.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -117,11 +117,6 @@ class G1ParCopyHelper : public OopClosure {
// during the GC (i.e., non-CSet objects). It is MT-safe.
inline void mark_object(oop obj);

// Mark the object if it's not already marked. This is used to mark
// objects pointed to by roots that have been forwarded during a
// GC. It is MT-safe.
inline void mark_forwarded_object(oop from_obj, oop to_obj);

G1ParCopyHelper(G1CollectedHeap* g1h, G1ParScanThreadState* par_scan_state);
~G1ParCopyHelper() { }

Expand Down
20 changes: 0 additions & 20 deletions src/hotspot/share/gc/g1/g1OopClosures.inline.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -214,21 +214,6 @@ void G1ParCopyHelper::mark_object(oop obj) {
_cm->mark_in_next_bitmap(_worker_id, obj);
}

void G1ParCopyHelper::mark_forwarded_object(oop from_obj, oop to_obj) {
assert(from_obj->is_forwarded(), "from obj should be forwarded");
assert(from_obj->forwardee() == to_obj, "to obj should be the forwardee");
assert(from_obj != to_obj, "should not be self-forwarded");

assert(_g1h->heap_region_containing(from_obj)->in_collection_set(), "from obj should be in the CSet");
assert(!_g1h->heap_region_containing(to_obj)->in_collection_set(), "should not mark objects in the CSet");

// The object might be in the process of being copied by another
// worker so we cannot trust that its to-space image is
// well-formed. So we have to read its size from its from-space
// image which we know should not be changing.
_cm->mark_in_next_bitmap(_worker_id, to_obj, from_obj->size());
}

void G1ParCopyHelper::trim_queue_partially() {
_par_scan_state->trim_queue_partially();
}
Expand Down Expand Up @@ -257,11 +242,6 @@ void G1ParCopyClosure<barrier, do_mark_object>::do_oop_work(T* p) {
}
assert(forwardee != NULL, "forwardee should not be NULL");
RawAccess<IS_NOT_NULL>::oop_store(p, forwardee);
if (do_mark_object != G1MarkNone && forwardee != obj) {
// If the object is self-forwarded we don't need to explicitly
// mark it, the evacuation failure protocol will do so.
mark_forwarded_object(obj, forwardee);
}

if (barrier == G1BarrierCLD) {
do_cld_barrier(forwardee);
Expand Down
8 changes: 0 additions & 8 deletions src/hotspot/share/gc/g1/heapRegion.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -524,14 +524,6 @@ class HeapRegion: public G1ContiguousSpace {
// info fields.
inline void note_end_of_marking();

// Notify the region that it will be used as to-space during a GC
// and we are about to start copying objects into it.
inline void note_start_of_copying(bool during_initial_mark);

// Notify the region that it ceases being to-space during a GC and
// we will not copy objects into it any more.
inline void note_end_of_copying(bool during_initial_mark);

// Notify the region that we are about to start processing
// self-forwarded objects during evac failure handling.
void note_self_forwarding_removal_start(bool during_initial_mark,
Expand Down
43 changes: 0 additions & 43 deletions src/hotspot/share/gc/g1/heapRegion.inline.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -252,49 +252,6 @@ inline void HeapRegion::note_end_of_marking() {
_next_marked_bytes = 0;
}

inline void HeapRegion::note_start_of_copying(bool during_initial_mark) {
if (is_survivor()) {
// This is how we always allocate survivors.
assert(_next_top_at_mark_start == bottom(), "invariant");
} else {
if (during_initial_mark) {
// During initial-mark we'll explicitly mark any objects on old
// regions that are pointed to by roots. Given that explicit
// marks only make sense under NTAMS it'd be nice if we could
// check that condition if we wanted to. Given that we don't
// know where the top of this region will end up, we simply set
// NTAMS to the end of the region so all marks will be below
// NTAMS. We'll set it to the actual top when we retire this region.
_next_top_at_mark_start = end();
} else {
// We could have re-used this old region as to-space over a
// couple of GCs since the start of the concurrent marking
// cycle. This means that [bottom,NTAMS) will contain objects
// copied up to and including initial-mark and [NTAMS, top)
// will contain objects copied during the concurrent marking cycle.
assert(top() >= _next_top_at_mark_start, "invariant");
}
}
}

inline void HeapRegion::note_end_of_copying(bool during_initial_mark) {
if (is_survivor()) {
// This is how we always allocate survivors.
assert(_next_top_at_mark_start == bottom(), "invariant");
} else {
if (during_initial_mark) {
// See the comment for note_start_of_copying() for the details
// on this.
assert(_next_top_at_mark_start == end(), "pre-condition");
_next_top_at_mark_start = top();
} else {
// See the comment for note_start_of_copying() for the details
// on this.
assert(top() >= _next_top_at_mark_start, "invariant");
}
}
}

inline bool HeapRegion::in_collection_set() const {
return G1CollectedHeap::heap()->is_in_cset(this);
}
Expand Down
Loading