diff -r 9766f73e770d src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp --- a/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp Fri May 31 14:32:44 2013 +0200 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp Wed Aug 14 23:33:14 2013 +0200 @@ -33,6 +33,7 @@ #include "memory/allocation.inline.hpp" #include "memory/blockOffsetTable.inline.hpp" #include "memory/resourceArea.hpp" +#include "memory/space.inline.hpp" #include "memory/universe.inline.hpp" #include "oops/oop.inline.hpp" #include "runtime/globals.hpp" @@ -2097,8 +2098,18 @@ // Support for compaction +class CompactibleFreeListSpaceProxy { + CompactibleFreeListSpace* const _cfls; + public: + CompactibleFreeListSpaceProxy(CompactibleFreeListSpace* cfls) : _cfls(cfls) {} + HeapWord* scan_limit() { return _cfls->end(); } + bool block_is_obj(const HeapWord* addr) { return _cfls->block_is_obj(addr); } + size_t block_size(const HeapWord* addr) { return _cfls->block_size(addr); } +}; + void CompactibleFreeListSpace::prepare_for_compaction(CompactPoint* cp) { - SCAN_AND_FORWARD(cp,end,block_is_obj,block_size); + CompactibleFreeListSpaceProxy proxy(this); + scan_and_forward(proxy, cp); // prepare_for_compaction() uses the space between live objects // so that later phase can skip dead space quickly. So verification // of the free lists doesn't work after. diff -r 9766f73e770d src/share/vm/memory/space.cpp --- a/src/share/vm/memory/space.cpp Fri May 31 14:32:44 2013 +0200 +++ b/src/share/vm/memory/space.cpp Wed Aug 14 23:33:14 2013 +0200 @@ -443,13 +443,33 @@ #define obj_size(q) oop(q)->size() #define adjust_obj_size(s) s +class CompactibleSpaceProxy { + CompactibleSpace* const _cs; + public: + CompactibleSpaceProxy(CompactibleSpace* cs) : _cs(cs) {} + HeapWord* scan_limit() { return _cs->end(); }; + bool block_is_obj(const HeapWord* addr) { return _cs->block_is_obj(addr); } + size_t block_size(const HeapWord* addr) { return _cs->block_size(addr); } +}; + void CompactibleSpace::prepare_for_compaction(CompactPoint* cp) { - SCAN_AND_FORWARD(cp, end, block_is_obj, block_size); + CompactibleSpaceProxy proxy(this); + scan_and_forward(proxy, cp); } +class ContiguousSpaceProxy { + ContiguousSpace* const _cs; + public: + ContiguousSpaceProxy(ContiguousSpace* cs) : _cs(cs) {} + HeapWord* scan_limit() { return _cs->top(); }; + bool block_is_obj(const HeapWord* addr) { return true; } + size_t block_size(const HeapWord* addr) { return oop(addr)->size(); } +}; + // Faster object search. void ContiguousSpace::prepare_for_compaction(CompactPoint* cp) { - SCAN_AND_FORWARD(cp, top, block_is_always_obj, obj_size); + ContiguousSpaceProxy proxy(this); + scan_and_forward(proxy, cp); } void Space::adjust_pointers() { diff -r 9766f73e770d src/share/vm/memory/space.hpp --- a/src/share/vm/memory/space.hpp Fri May 31 14:32:44 2013 +0200 +++ b/src/share/vm/memory/space.hpp Wed Aug 14 23:33:14 2013 +0200 @@ -507,129 +507,11 @@ // words remaining after this operation. bool insert_deadspace(size_t& allowed_deadspace_words, HeapWord* q, size_t word_len); + + template + void scan_and_forward(SpaceProxy& proxy, CompactPoint* cp); }; -#define SCAN_AND_FORWARD(cp,scan_limit,block_is_obj,block_size) { \ - /* Compute the new addresses for the live objects and store it in the mark \ - * Used by universe::mark_sweep_phase2() \ - */ \ - HeapWord* compact_top; /* This is where we are currently compacting to. */ \ - \ - /* We're sure to be here before any objects are compacted into this \ - * space, so this is a good time to initialize this: \ - */ \ - set_compaction_top(bottom()); \ - \ - if (cp->space == NULL) { \ - assert(cp->gen != NULL, "need a generation"); \ - assert(cp->threshold == NULL, "just checking"); \ - assert(cp->gen->first_compaction_space() == this, "just checking"); \ - cp->space = cp->gen->first_compaction_space(); \ - compact_top = cp->space->bottom(); \ - cp->space->set_compaction_top(compact_top); \ - cp->threshold = cp->space->initialize_threshold(); \ - } else { \ - compact_top = cp->space->compaction_top(); \ - } \ - \ - /* We allow some amount of garbage towards the bottom of the space, so \ - * we don't start compacting before there is a significant gain to be made.\ - * Occasionally, we want to ensure a full compaction, which is determined \ - * by the MarkSweepAlwaysCompactCount parameter. \ - */ \ - uint invocations = MarkSweep::total_invocations(); \ - bool skip_dead = ((invocations % MarkSweepAlwaysCompactCount) != 0); \ - \ - size_t allowed_deadspace = 0; \ - if (skip_dead) { \ - const size_t ratio = allowed_dead_ratio(); \ - allowed_deadspace = (capacity() * ratio / 100) / HeapWordSize; \ - } \ - \ - HeapWord* q = bottom(); \ - HeapWord* t = scan_limit(); \ - \ - HeapWord* end_of_live= q; /* One byte beyond the last byte of the last \ - live object. */ \ - HeapWord* first_dead = end();/* The first dead object. */ \ - LiveRange* liveRange = NULL; /* The current live range, recorded in the \ - first header of preceding free area. */ \ - _first_dead = first_dead; \ - \ - const intx interval = PrefetchScanIntervalInBytes; \ - \ - while (q < t) { \ - assert(!block_is_obj(q) || \ - oop(q)->mark()->is_marked() || oop(q)->mark()->is_unlocked() || \ - oop(q)->mark()->has_bias_pattern(), \ - "these are the only valid states during a mark sweep"); \ - if (block_is_obj(q) && oop(q)->is_gc_marked()) { \ - /* prefetch beyond q */ \ - Prefetch::write(q, interval); \ - size_t size = block_size(q); \ - compact_top = cp->space->forward(oop(q), size, cp, compact_top); \ - q += size; \ - end_of_live = q; \ - } else { \ - /* run over all the contiguous dead objects */ \ - HeapWord* end = q; \ - do { \ - /* prefetch beyond end */ \ - Prefetch::write(end, interval); \ - end += block_size(end); \ - } while (end < t && (!block_is_obj(end) || !oop(end)->is_gc_marked()));\ - \ - /* see if we might want to pretend this object is alive so that \ - * we don't have to compact quite as often. \ - */ \ - if (allowed_deadspace > 0 && q == compact_top) { \ - size_t sz = pointer_delta(end, q); \ - if (insert_deadspace(allowed_deadspace, q, sz)) { \ - compact_top = cp->space->forward(oop(q), sz, cp, compact_top); \ - q = end; \ - end_of_live = end; \ - continue; \ - } \ - } \ - \ - /* otherwise, it really is a free region. */ \ - \ - /* for the previous LiveRange, record the end of the live objects. */ \ - if (liveRange) { \ - liveRange->set_end(q); \ - } \ - \ - /* record the current LiveRange object. \ - * liveRange->start() is overlaid on the mark word. \ - */ \ - liveRange = (LiveRange*)q; \ - liveRange->set_start(end); \ - liveRange->set_end(end); \ - \ - /* see if this is the first dead region. */ \ - if (q < first_dead) { \ - first_dead = q; \ - } \ - \ - /* move on to the next object */ \ - q = end; \ - } \ - } \ - \ - assert(q == t, "just checking"); \ - if (liveRange != NULL) { \ - liveRange->set_end(q); \ - } \ - _end_of_live = end_of_live; \ - if (end_of_live < first_dead) { \ - first_dead = end_of_live; \ - } \ - _first_dead = first_dead; \ - \ - /* save the compaction_top of the compaction space. */ \ - cp->space->set_compaction_top(compact_top); \ -} - #define SCAN_AND_ADJUST_POINTERS(adjust_obj_size) { \ /* adjust all the interior pointers to point at the new locations of objects \ * Used by MarkSweep::mark_sweep_phase3() */ \ diff -r 9766f73e770d src/share/vm/memory/space.inline.hpp --- a/src/share/vm/memory/space.inline.hpp Fri May 31 14:32:44 2013 +0200 +++ b/src/share/vm/memory/space.inline.hpp Wed Aug 14 23:33:14 2013 +0200 @@ -25,6 +25,7 @@ #ifndef SHARE_VM_MEMORY_SPACE_INLINE_HPP #define SHARE_VM_MEMORY_SPACE_INLINE_HPP +#include "gc_implementation/shared/liveRange.hpp" #include "gc_interface/collectedHeap.hpp" #include "memory/space.hpp" #include "memory/universe.hpp" @@ -67,4 +68,121 @@ return _offsets.block_start(p); } +template +inline void CompactibleSpace::scan_and_forward(SpaceProxy& proxy, CompactPoint* cp) { + // Compute the new addresses for the live objects and store it in the mark + // Used by universe::mark_sweep_phase2() + HeapWord* compact_top; // This is where we are currently compacting to. + + // We're sure to be here before any objects are compacted into this + // space, so this is a good time to initialize this: + set_compaction_top(bottom()); + + if (cp->space == NULL) { + assert(cp->gen != NULL, "need a generation"); + assert(cp->threshold == NULL, "just checking"); + assert(cp->gen->first_compaction_space() == this, "just checking"); + cp->space = cp->gen->first_compaction_space(); + compact_top = cp->space->bottom(); + cp->space->set_compaction_top(compact_top); + cp->threshold = cp->space->initialize_threshold(); + } else { + compact_top = cp->space->compaction_top(); + } + + // We allow some amount of garbage towards the bottom of the space, so + // we don't start compacting before there is a significant gain to be made.\ + // Occasionally, we want to ensure a full compaction, which is determined + // by the MarkSweepAlwaysCompactCount parameter. + uint invocations = MarkSweep::total_invocations(); + bool skip_dead = ((invocations % MarkSweepAlwaysCompactCount) != 0); + + size_t allowed_deadspace = 0; + if (skip_dead) { + const size_t ratio = allowed_dead_ratio(); + allowed_deadspace = (capacity() * ratio / 100) / HeapWordSize; + } + + HeapWord* q = bottom(); + HeapWord* t = proxy.scan_limit(); + + HeapWord* end_of_live= q; // One byte beyond the last byte of the last + // live object. + HeapWord* first_dead = end(); // The first dead object. + LiveRange* liveRange = NULL; // The current live range, recorded in the + // first header of preceding free area. + _first_dead = first_dead; + + const intx interval = PrefetchScanIntervalInBytes; + + while (q < t) { + assert(!proxy.block_is_obj(q) || + oop(q)->mark()->is_marked() || oop(q)->mark()->is_unlocked() || + oop(q)->mark()->has_bias_pattern(), + "these are the only valid states during a mark sweep"); + if (proxy.block_is_obj(q) && oop(q)->is_gc_marked()) { + // prefetch beyond q + Prefetch::write(q, interval); + size_t size = proxy.block_size(q); + compact_top = cp->space->forward(oop(q), size, cp, compact_top); + q += size; + end_of_live = q; + } else { + // run over all the contiguous dead objects + HeapWord* end = q; + do { + // prefetch beyond end + Prefetch::write(end, interval); + end += proxy.block_size(end); + } while (end < t && (!proxy.block_is_obj(end) || !oop(end)->is_gc_marked()));\ + + // see if we might want to pretend this object is alive so that + // we don't have to compact quite as often. + if (allowed_deadspace > 0 && q == compact_top) { + size_t sz = pointer_delta(end, q); + if (insert_deadspace(allowed_deadspace, q, sz)) { + compact_top = cp->space->forward(oop(q), sz, cp, compact_top); + q = end; + end_of_live = end; + continue; + } + } + + // otherwise, it really is a free region. + + // for the previous LiveRange, record the end of the live objects. + if (liveRange) { + liveRange->set_end(q); + } + + // record the current LiveRange object. + // liveRange->start() is overlaid on the mark word. + liveRange = (LiveRange*)q; + liveRange->set_start(end); + liveRange->set_end(end); + + // see if this is the first dead region. + if (q < first_dead) { + first_dead = q; + } + + // move on to the next object + q = end; + } + } + + assert(q == t, "just checking"); + if (liveRange != NULL) { + liveRange->set_end(q); + } + _end_of_live = end_of_live; + if (end_of_live < first_dead) { + first_dead = end_of_live; + } + _first_dead = first_dead; + + // save the compaction_top of the compaction space. + cp->space->set_compaction_top(compact_top); +} + #endif // SHARE_VM_MEMORY_SPACE_INLINE_HPP