diff -r 4e048537e103 src/share/vm/opto/c2compiler.cpp --- a/src/share/vm/opto/c2compiler.cpp Tue Jun 15 11:28:08 2021 +0530 +++ b/src/share/vm/opto/c2compiler.cpp Wed Jun 16 13:43:40 2021 -0700 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -51,6 +51,9 @@ const char* C2Compiler::retry_no_escape_analysis() { return "retry without escape analysis"; } +const char* C2Compiler::retry_no_locks_coarsening() { + return "retry without locks coarsening"; +} const char* C2Compiler::retry_class_loading_during_parsing() { return "retry class loading during parsing"; } @@ -113,9 +116,11 @@ bool subsume_loads = SubsumeLoads; bool do_escape_analysis = DoEscapeAnalysis && !env->jvmti_can_access_local_variables(); bool eliminate_boxing = EliminateAutoBox; + bool do_locks_coarsening = EliminateLocks; + while (!env->failing()) { // Attempt to compile while subsuming loads into machine instructions. - Compile C(env, this, target, entry_bci, subsume_loads, do_escape_analysis, eliminate_boxing); + Compile C(env, this, target, entry_bci, subsume_loads, do_escape_analysis, eliminate_boxing, do_locks_coarsening); // Check result and retry if appropriate. @@ -134,6 +139,11 @@ do_escape_analysis = false; continue; // retry } + if (C.failure_reason_is(retry_no_locks_coarsening())) { + assert(do_locks_coarsening, "must make progress"); + do_locks_coarsening = false; + continue; // retry + } if (C.has_boxed_value()) { // Recompile without boxing elimination regardless failure reason. assert(eliminate_boxing, "must make progress"); @@ -154,6 +164,10 @@ do_escape_analysis = false; continue; // retry } + if (do_locks_coarsening) { + do_locks_coarsening = false; + continue; // retry + } } // No retry; just break the loop. diff -r 4e048537e103 src/share/vm/opto/c2compiler.hpp --- a/src/share/vm/opto/c2compiler.hpp Tue Jun 15 11:28:08 2021 +0530 +++ b/src/share/vm/opto/c2compiler.hpp Wed Jun 16 13:43:40 2021 -0700 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -49,6 +49,7 @@ // sentinel value used to trigger backtracking in compile_method(). static const char* retry_no_subsuming_loads(); static const char* retry_no_escape_analysis(); + static const char* retry_no_locks_coarsening(); static const char* retry_class_loading_during_parsing(); // Print compilation timers and statistics diff -r 4e048537e103 src/share/vm/opto/callnode.cpp --- a/src/share/vm/opto/callnode.cpp Tue Jun 15 11:28:08 2021 +0530 +++ b/src/share/vm/opto/callnode.cpp Wed Jun 16 13:43:40 2021 -0700 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -1637,6 +1637,12 @@ } +const char* AbstractLockNode::_kind_names[] = {"Regular", "NonEscObj", "Coarsened", "Nested"}; + +const char * AbstractLockNode::kind_as_string() const { + return _kind_names[_kind]; +} + #ifndef PRODUCT // // Create a counter which counts the number of times this lock is acquired @@ -1653,6 +1659,11 @@ _counter->set_tag(NamedCounter::EliminatedLockCounter); } } + +void AbstractLockNode::dump_spec(outputStream* st) const { + st->print("%s ", _kind_names[_kind]); + CallNode::dump_spec(st); +} #endif //============================================================================= @@ -1686,6 +1697,9 @@ return result; } + if (!phase->C->do_locks_coarsening()) { + return result; // Compiling without locks coarsening + } // // Try lock coarsening // @@ -1723,6 +1737,9 @@ if (PrintEliminateLocks) { int locks = 0; int unlocks = 0; + if (Verbose) { + tty->print_cr("=== Locks coarsening ==="); + } for (int i = 0; i < lock_ops.length(); i++) { AbstractLockNode* lock = lock_ops.at(i); if (lock->Opcode() == Op_Lock) @@ -1730,10 +1747,11 @@ else unlocks++; if (Verbose) { - lock->dump(1); + tty->print(" %d: ", i); + lock->dump(); } } - tty->print_cr("***Eliminated %d unlocks and %d locks", unlocks, locks); + tty->print_cr("=== Coarsened %d unlocks and %d locks", unlocks, locks); } #endif @@ -1748,6 +1766,8 @@ #endif lock->set_coarsened(); } + // Record this coarsened group. + phase->C->add_coarsened_locks(lock_ops); } else if (ctrl->is_Region() && iter->_worklist.member(ctrl)) { // We weren't able to find any opportunities but the region this @@ -1781,15 +1801,34 @@ // Ignore complex cases: merged locks or multiple locks. Node* obj = obj_node(); LockNode* unique_lock = NULL; - if (!box->is_simple_lock_region(&unique_lock, obj)) { + Node* bad_lock = NULL; + if (!box->is_simple_lock_region(&unique_lock, obj, &bad_lock)) { #ifdef ASSERT - this->log_lock_optimization(c, "eliminate_lock_INLR_2a"); + this->log_lock_optimization(c, "eliminate_lock_INLR_2a", bad_lock); #endif return false; } if (unique_lock != this) { #ifdef ASSERT - this->log_lock_optimization(c, "eliminate_lock_INLR_2b"); + this->log_lock_optimization(c, "eliminate_lock_INLR_2b", (unique_lock != NULL ? unique_lock : bad_lock)); + if (PrintEliminateLocks && Verbose) { + tty->print_cr("=============== unique_lock != this ============"); + tty->print_cr(" this:"); + this->dump(); + tty->print_cr(" box:"); + box->dump(); + tty->print_cr(" obj:"); + obj->dump(); + if (unique_lock != NULL) { + tty->print_cr(" unique_lock:"); + unique_lock->dump(); + } + if (bad_lock != NULL) { + tty->print_cr(" bad_lock:"); + bad_lock->dump(); + } + tty->print_cr("==============="); + } #endif return false; } @@ -1853,23 +1892,21 @@ return result; } -const char * AbstractLockNode::kind_as_string() const { - return is_coarsened() ? "coarsened" : - is_nested() ? "nested" : - is_non_esc_obj() ? "non_escaping" : - "?"; -} - -void AbstractLockNode::log_lock_optimization(Compile *C, const char * tag) const { +void AbstractLockNode::log_lock_optimization(Compile *C, const char * tag, Node* bad_lock) const { if (C == NULL) { return; } CompileLog* log = C->log(); if (log != NULL) { - log->begin_head("%s lock='%d' compile_id='%d' class_id='%s' kind='%s'", - tag, is_Lock(), C->compile_id(), + Node* box = box_node(); + Node* obj = obj_node(); + int box_id = box != NULL ? box->_idx : -1; + int obj_id = obj != NULL ? obj->_idx : -1; + + log->begin_head("%s compile_id='%d' lock_id='%d' class='%s' kind='%s' box_id='%d' obj_id='%d' bad_id='%d'", + tag, C->compile_id(), this->_idx, is_Unlock() ? "unlock" : is_Lock() ? "lock" : "?", - kind_as_string()); + kind_as_string(), box_id, obj_id, (bad_lock != NULL ? bad_lock->_idx : -1)); log->stamp(); log->end_head(); JVMState* p = is_Unlock() ? (as_Unlock()->dbg_jvms()) : jvms(); diff -r 4e048537e103 src/share/vm/opto/callnode.hpp --- a/src/share/vm/opto/callnode.hpp Tue Jun 15 11:28:08 2021 +0530 +++ b/src/share/vm/opto/callnode.hpp Wed Jun 16 13:43:40 2021 -0700 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -958,6 +958,9 @@ Coarsened, // Lock was coarsened Nested // Nested lock } _kind; + + static const char* _kind_names[Nested+1]; + #ifndef PRODUCT NamedCounter* _counter; #endif @@ -1002,7 +1005,7 @@ bool is_nested() const { return (_kind == Nested); } const char * kind_as_string() const; - void log_lock_optimization(Compile* c, const char * tag) const; + void log_lock_optimization(Compile* c, const char * tag, Node* bad_lock = NULL) const; void set_non_esc_obj() { _kind = NonEscObj; set_eliminated_lock_counter(); } void set_coarsened() { _kind = Coarsened; set_eliminated_lock_counter(); } @@ -1014,6 +1017,7 @@ #ifndef PRODUCT void create_lock_counter(JVMState* s); NamedCounter* counter() const { return _counter; } + virtual void dump_spec(outputStream* st) const; #endif }; diff -r 4e048537e103 src/share/vm/opto/compile.cpp --- a/src/share/vm/opto/compile.cpp Tue Jun 15 11:28:08 2021 +0530 +++ b/src/share/vm/opto/compile.cpp Wed Jun 16 13:43:40 2021 -0700 @@ -512,6 +512,12 @@ tty->print_cr("** Bailout: Recompile without boxing elimination **"); tty->print_cr("*********************************************************"); } + if ((_do_locks_coarsening != EliminateLocks) && PrintOpto) { + // Recompiling without locks coarsening + tty->print_cr("*********************************************************"); + tty->print_cr("** Bailout: Recompile without locks coarsening **"); + tty->print_cr("*********************************************************"); + } if (env()->break_at_compile()) { // Open the debugger when compiling this method. tty->print("### Breaking when compiling: "); @@ -633,7 +639,8 @@ Compile::Compile( ciEnv* ci_env, C2Compiler* compiler, ciMethod* target, int osr_bci, - bool subsume_loads, bool do_escape_analysis, bool eliminate_boxing ) + bool subsume_loads, bool do_escape_analysis, bool eliminate_boxing, + bool do_locks_coarsening) : Phase(Compiler), _env(ci_env), _log(ci_env->log()), @@ -650,6 +657,7 @@ _subsume_loads(subsume_loads), _do_escape_analysis(do_escape_analysis), _eliminate_boxing(eliminate_boxing), + _do_locks_coarsening(do_locks_coarsening), _failure_reason(NULL), _code_buffer("Compile::Fill_buffer"), _orig_pc_slot(0), @@ -971,6 +979,7 @@ _subsume_loads(true), _do_escape_analysis(false), _eliminate_boxing(false), + _do_locks_coarsening(false), _failure_reason(NULL), _code_buffer("Compile::Fill_buffer"), _has_method_handle_invokes(false), @@ -1164,6 +1173,7 @@ _predicate_opaqs = new(comp_arena()) GrowableArray(comp_arena(), 8, 0, NULL); _expensive_nodes = new(comp_arena()) GrowableArray(comp_arena(), 8, 0, NULL); _range_check_casts = new(comp_arena()) GrowableArray(comp_arena(), 8, 0, NULL); + _coarsened_locks = new(comp_arena()) GrowableArray(comp_arena(), 8, 0, NULL); register_library_intrinsics(); } @@ -4065,6 +4075,87 @@ } /** + * Track coarsened Lock and Unlock nodes. + */ + +class Lock_List : public Node_List { + uint _origin_cnt; +public: + Lock_List(Arena *a, uint cnt) : Node_List(a), _origin_cnt(cnt) {} + uint origin_cnt() const { return _origin_cnt; } +}; + +void Compile::add_coarsened_locks(GrowableArray& locks) { + int length = locks.length(); + if (length > 0) { + // Have to keep this list until locks elimination during Macro nodes elimination. + Lock_List* locks_list = new (comp_arena()) Lock_List(comp_arena(), length); + for (int i = 0; i < length; i++) { + AbstractLockNode* lock = locks.at(i); + assert(lock->is_coarsened(), "expecting only coarsened AbstractLock nodes"); + locks_list->push(lock); + } + _coarsened_locks->append(locks_list); + } +} + +void Compile::remove_coarsened_lock(Node* n) { + if (n->is_AbstractLock()) { + int count = coarsened_count(); + for (int i = 0; i < count; i++) { + Node_List* locks_list = _coarsened_locks->at(i); + locks_list->yank(n); + } + } +} + +bool Compile::coarsened_locks_consistent() { + int count = coarsened_count(); + for (int i = 0; i < count; i++) { + bool unbalanced = false; + bool modified = false; // track locks kind modifications + Lock_List* locks_list = (Lock_List*)_coarsened_locks->at(i); + uint size = locks_list->size(); + if (size != locks_list->origin_cnt()) { + unbalanced = true; // Some locks were removed from list + } else { + for (uint j = 0; j < size; j++) { + Node* lock = locks_list->at(j); + // All nodes in group should have the same state (modified or not) + if (!lock->as_AbstractLock()->is_coarsened()) { + if (j == 0) { + // first on list was modified, the rest should be too for consistency + modified = true; + } else if (!modified) { + // this lock was modified but previous locks on the list were not + unbalanced = true; + break; + } + } else if (modified) { + // previous locks on list were modified but not this lock + unbalanced = true; + break; + } + } + } + if (unbalanced) { + // unbalanced monitor enter/exit - only some [un]lock nodes were removed or modified +#ifdef ASSERT + if (PrintEliminateLocks) { + tty->print_cr("=== unbalanced coarsened locks ==="); + for (uint l = 0; l < size; l++) { + locks_list->at(l)->dump(); + } + } +#endif + record_failure(C2Compiler::retry_no_locks_coarsening()); + return false; + } + } + return true; +} + +/** * Remove the speculative part of types and clean up the graph */ void Compile::remove_speculative_types(PhaseIterGVN &igvn) { diff -r 4e048537e103 src/share/vm/opto/compile.hpp --- a/src/share/vm/opto/compile.hpp Tue Jun 15 11:28:08 2021 +0530 +++ b/src/share/vm/opto/compile.hpp Wed Jun 16 13:43:40 2021 -0700 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -44,6 +44,7 @@ #include "trace/tracing.hpp" #include "utilities/ticks.hpp" +class AbstractLockNode; class Block; class Bundle; class C2Compiler; @@ -59,6 +60,7 @@ class MachSafePointNode; class Node; class Node_Array; +class Node_List; class Node_Notes; class OptoReg; class PhaseCFG; @@ -279,6 +281,7 @@ const bool _subsume_loads; // Load can be matched as part of a larger op. const bool _do_escape_analysis; // Do escape analysis. const bool _eliminate_boxing; // Do boxing elimination. + const bool _do_locks_coarsening; // Do locks coarsening ciMethod* _method; // The method being compiled. int _entry_bci; // entry bci for osr methods. const TypeFunc* _tf; // My kind of signature @@ -338,6 +341,7 @@ GrowableArray* _predicate_opaqs; // List of Opaque1 nodes for the loop predicates. GrowableArray* _expensive_nodes; // List of nodes that are expensive to compute and that we'd better not let the GVN freely common GrowableArray* _range_check_casts; // List of CastII nodes with a range check dependency + GrowableArray* _coarsened_locks; // List of coarsened Lock and Unlock nodes ConnectionGraph* _congraph; #ifndef PRODUCT IdealGraphPrinter* _printer; @@ -529,7 +533,8 @@ /** Do aggressive boxing elimination. */ bool aggressive_unboxing() const { return _eliminate_boxing && AggressiveUnboxing; } bool save_argument_registers() const { return _save_argument_registers; } - + /** Do locks coarsening. */ + bool do_locks_coarsening() const { return _do_locks_coarsening; } // Other fixed compilation parameters. ciMethod* method() const { return _method; } @@ -666,9 +671,12 @@ int macro_count() const { return _macro_nodes->length(); } int predicate_count() const { return _predicate_opaqs->length();} int expensive_count() const { return _expensive_nodes->length(); } + int coarsened_count() const { return _coarsened_locks->length(); } + Node* macro_node(int idx) const { return _macro_nodes->at(idx); } Node* predicate_opaque1_node(int idx) const { return _predicate_opaqs->at(idx);} Node* expensive_node(int idx) const { return _expensive_nodes->at(idx); } + ConnectionGraph* congraph() { return _congraph;} void set_congraph(ConnectionGraph* congraph) { _congraph = congraph;} void add_macro_node(Node * n) { @@ -685,6 +693,10 @@ if (predicate_count() > 0 && _predicate_opaqs->contains(n)){ _predicate_opaqs->remove(n); } + // Remove from coarsened locks list if present + if (coarsened_count() > 0) { + remove_coarsened_lock(n); + } } void add_expensive_node(Node * n); void remove_expensive_node(Node * n) { @@ -697,6 +709,9 @@ assert(_macro_nodes->contains(n), "should have already been in macro list"); _predicate_opaqs->append(n); } + void add_coarsened_locks(GrowableArray& locks); + void remove_coarsened_lock(Node* n); + bool coarsened_locks_consistent(); // Range check dependent CastII nodes that can be removed after loop optimizations void add_range_check_cast(Node* n); @@ -1041,7 +1056,7 @@ // continuation. Compile(ciEnv* ci_env, C2Compiler* compiler, ciMethod* target, int entry_bci, bool subsume_loads, bool do_escape_analysis, - bool eliminate_boxing); + bool eliminate_boxing, bool do_locks_coarsening); // Second major entry point. From the TypeFunc signature, generate code // to pass arguments from the Java calling convention to the C calling diff -r 4e048537e103 src/share/vm/opto/locknode.cpp --- a/src/share/vm/opto/locknode.cpp Tue Jun 15 11:28:08 2021 +0530 +++ b/src/share/vm/opto/locknode.cpp Wed Jun 16 13:43:40 2021 -0700 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -85,7 +85,7 @@ } // Is BoxLock node used for one simple lock region (same box and obj)? -bool BoxLockNode::is_simple_lock_region(LockNode** unique_lock, Node* obj) { +bool BoxLockNode::is_simple_lock_region(LockNode** unique_lock, Node* obj, Node** bad_lock) { LockNode* lock = NULL; bool has_one_lock = false; for (uint i = 0; i < this->outcnt(); i++) { @@ -102,9 +102,15 @@ has_one_lock = true; } else if (lock != alock->as_Lock()) { has_one_lock = false; + if (bad_lock != NULL) { + *bad_lock = alock; + } } } } else { + if (bad_lock != NULL) { + *bad_lock = alock; + } return false; // Different objects } } diff -r 4e048537e103 src/share/vm/opto/locknode.hpp --- a/src/share/vm/opto/locknode.hpp Tue Jun 15 11:28:08 2021 +0530 +++ b/src/share/vm/opto/locknode.hpp Wed Jun 16 13:43:40 2021 -0700 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -75,7 +75,7 @@ void set_eliminated() { _is_eliminated = true; } // Is BoxLock node used for one simple lock region? - bool is_simple_lock_region(LockNode** unique_lock, Node* obj); + bool is_simple_lock_region(LockNode** unique_lock, Node* obj, Node** bad_lock); #ifndef PRODUCT virtual void format( PhaseRegAlloc *, outputStream *st ) const; diff -r 4e048537e103 src/share/vm/opto/macro.cpp --- a/src/share/vm/opto/macro.cpp Tue Jun 15 11:28:08 2021 +0530 +++ b/src/share/vm/opto/macro.cpp Wed Jun 16 13:43:40 2021 -0700 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2005, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -1887,15 +1887,15 @@ // Mark all associated (same box and obj) lock and unlock nodes for // elimination if some of them marked already. void PhaseMacroExpand::mark_eliminated_box(Node* oldbox, Node* obj) { - if (oldbox->as_BoxLock()->is_eliminated()) + if (oldbox->as_BoxLock()->is_eliminated()) { return; // This BoxLock node was processed already. - + } // New implementation (EliminateNestedLocks) has separate BoxLock // node for each locked region so mark all associated locks/unlocks as // eliminated even if different objects are referenced in one locked region // (for example, OSR compilation of nested loop inside locked scope). if (EliminateNestedLocks || - oldbox->as_BoxLock()->is_simple_lock_region(NULL, obj)) { + oldbox->as_BoxLock()->is_simple_lock_region(NULL, obj, NULL)) { // Box is used only in one lock region. Mark this box as eliminated. _igvn.hash_delete(oldbox); oldbox->as_BoxLock()->set_eliminated(); // This changes box's hash value @@ -2067,11 +2067,7 @@ #ifndef PRODUCT if (PrintEliminateLocks) { - if (alock->is_Lock()) { - tty->print_cr("++++ Eliminated: %d Lock", alock->_idx); - } else { - tty->print_cr("++++ Eliminated: %d Unlock", alock->_idx); - } + tty->print_cr("++++ Eliminated: %d %s '%s'", alock->_idx, (alock->is_Lock() ? "Lock" : "Unlock"), alock->kind_as_string()); } #endif @@ -2439,16 +2435,21 @@ if (C->macro_count() == 0) return; - // First, attempt to eliminate locks + // Before elimination may re-mark (change to Nested or NonEscObj) + // all associated (same box and obj) lock and unlock nodes. int cnt = C->macro_count(); for (int i=0; i < cnt; i++) { Node *n = C->macro_node(i); if (n->is_AbstractLock()) { // Lock and Unlock nodes - // Before elimination mark all associated (same box and obj) - // lock and unlock nodes. mark_eliminated_locking_nodes(n->as_AbstractLock()); } } + // Re-marking may break consistency of Coarsened locks. + if (!C->coarsened_locks_consistent()) { + return; // bailout + } + + // First, attempt to eliminate locks bool progress = true; while (progress) { progress = false; @@ -2502,6 +2503,7 @@ bool PhaseMacroExpand::expand_macro_nodes() { // Last attempt to eliminate macro nodes. eliminate_macro_nodes(); + if (C->failing()) return true; // Make sure expansion will not cause node limit to be exceeded. // Worst case is a macro node gets expanded into about 50 nodes. diff -r 4e048537e103 test/TEST.groups --- a/test/TEST.groups Tue Jun 15 11:28:08 2021 +0530 +++ b/test/TEST.groups Wed Jun 16 13:43:40 2021 -0700 @@ -503,6 +503,7 @@ compiler/jsr292/ \ compiler/7082949/ \ compiler/6990212/ \ + compiler/locks/ \ compiler/loopopts/ \ compiler/6659207/ \ compiler/6855164/ \ diff -r 4e048537e103 test/compiler/locks/TestNestedLocksElimination.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compiler/locks/TestNestedLocksElimination.java Wed Jun 16 13:43:40 2021 -0700 @@ -0,0 +1,150 @@ +/* + * Copyright (c) 2021, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +/* + * @test + * @bug 8268347 + * @summary Nested locks optimization may create unbalanced monitor enter/exit code + * + * @run main/othervm -XX:-BackgroundCompilation + * -XX:CompileCommand=dontinline,TestNestedLocksElimination::foo + * -XX:CompileCommand=dontinline,TestNestedLocksElimination::getNext + * -XX:CompileCommand=dontinline,TestNestedLocksElimination::getHolder + * TestNestedLocksElimination + */ + +import java.util.LinkedList; + +public class TestNestedLocksElimination { + + private LinkedList buffers = new LinkedList<>(); + private boolean complete = false; + private int bufferSize; + + void foo(char[] ca) { + // Don't inline dummy method + } + + // Don't inline + char[] getNext(int length, int count) { + if (this.buffers.isEmpty()) { + return new char[100]; + } + char[] b = (char[]) this.buffers.getFirst(); + if (count >= 100) { + this.complete = true; + this.buffers.clear(); // empty + } + return b; + } + + synchronized boolean isComplete() { + return this.complete; + } + + synchronized boolean availableSegment() { + return (buffers.isEmpty() == false); + } + + // Don't inline + TestNestedLocksElimination getHolder(TestNestedLocksElimination s1, TestNestedLocksElimination s2, int count) { + return (count & 7) == 0 ? s2 : s1; + } + + int test(TestNestedLocksElimination s1, TestNestedLocksElimination s2, int maxToSend) { + boolean isComplete = true; + boolean availableSegment = false; + int size = 0; + int count = 0; + do { + TestNestedLocksElimination s = getHolder(s1, s2, count++); + + synchronized(s) { + isComplete = s.isComplete(); + availableSegment = s.availableSegment(); + } + + synchronized (this) { + size = 0; + while (size < maxToSend) { + char[] b = null; + // This is outer Lock region for object 's'. + // Locks from following inlined methods are "nested" + // because they reference the same object. + synchronized(s) { + b = s.getNext(maxToSend - size, count); + + // The next is bi-morphic call with both calls inlined. + // But one is synchronized and the other is not. + // Class check for bi-morphic call is loop invariant + // and will trigger loop unswitching. + // Loop unswitching will create two versions of loop + // with gollowing calls inlinined in both versions. + + isComplete = s.isComplete(); + + // The next synchronized method availableSegment() is + // inlined and its Lock will be "coarsened" with Unlock + // in version of loop with inlined synchronized method + // isComplete(). + // Nested Lock Optimization will mark only this Unlock + // as nested (as part of "nested" pair lock/unlock). + // Locks elimination will remove "coarsened" Lock from + // availableSegment() method leaving unmatched unlock. + + availableSegment = s.availableSegment(); + } + foo(b); + size += b.length; + } + } + } while (availableSegment == true || isComplete == false); + return size; + } + + public static void main(String[] args) { + int count = 0; + int n = 0; + + TestNestedLocksElimination t = new TestNestedLocksElimination(); + TestNestedLocksElimination s1 = new TestNestedLocksElimination(); + TestNestedLocksElimination s2 = new TestNestedLocksEliminationSub(); + + char[] c = new char[100]; + while (n++ < 20_000) { + s1.buffers.add(c); + s2.buffers.add(c); + count += t.test(s1, s2, 10000); + } + + System.out.println(" count: " + count); + } +} + +class TestNestedLocksEliminationSub extends TestNestedLocksElimination { + public boolean isComplete() { + return true; + } +} +