-
Notifications
You must be signed in to change notification settings - Fork 161
Expand file tree
/
Copy pathcallGenerator.cpp
More file actions
1569 lines (1362 loc) · 59.9 KB
/
callGenerator.cpp
File metadata and controls
1569 lines (1362 loc) · 59.9 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
/*
* Copyright (c) 2000, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "ci/bcEscapeAnalyzer.hpp"
#include "ci/ciCallSite.hpp"
#include "ci/ciMemberName.hpp"
#include "ci/ciMethodHandle.hpp"
#include "ci/ciObjArray.hpp"
#include "classfile/javaClasses.hpp"
#include "compiler/compileLog.hpp"
#include "oops/accessDecorators.hpp"
#include "opto/addnode.hpp"
#include "opto/callGenerator.hpp"
#include "opto/callnode.hpp"
#include "opto/castnode.hpp"
#include "opto/cfgnode.hpp"
#include "opto/inlinetypenode.hpp"
#include "opto/parse.hpp"
#include "opto/rootnode.hpp"
#include "opto/runtime.hpp"
#include "opto/subnode.hpp"
#include "runtime/os.inline.hpp"
#include "runtime/sharedRuntime.hpp"
#include "utilities/debug.hpp"
// Utility function.
const TypeFunc* CallGenerator::tf() const {
return TypeFunc::make(method());
}
bool CallGenerator::is_inlined_method_handle_intrinsic(JVMState* jvms, ciMethod* m) {
return is_inlined_method_handle_intrinsic(jvms->method(), jvms->bci(), m);
}
bool CallGenerator::is_inlined_method_handle_intrinsic(ciMethod* caller, int bci, ciMethod* m) {
ciMethod* symbolic_info = caller->get_method_at_bci(bci);
return is_inlined_method_handle_intrinsic(symbolic_info, m);
}
bool CallGenerator::is_inlined_method_handle_intrinsic(ciMethod* symbolic_info, ciMethod* m) {
return symbolic_info->is_method_handle_intrinsic() && !m->is_method_handle_intrinsic();
}
//-----------------------------ParseGenerator---------------------------------
// Internal class which handles all direct bytecode traversal.
class ParseGenerator : public InlineCallGenerator {
private:
bool _is_osr;
float _expected_uses;
public:
ParseGenerator(ciMethod* method, float expected_uses, bool is_osr = false)
: InlineCallGenerator(method)
{
_is_osr = is_osr;
_expected_uses = expected_uses;
assert(InlineTree::check_can_parse(method) == nullptr, "parse must be possible");
}
virtual bool is_parse() const { return true; }
virtual JVMState* generate(JVMState* jvms);
int is_osr() { return _is_osr; }
};
JVMState* ParseGenerator::generate(JVMState* jvms) {
Compile* C = Compile::current();
if (is_osr()) {
// The JVMS for a OSR has a single argument (see its TypeFunc).
assert(jvms->depth() == 1, "no inline OSR");
}
if (C->failing()) {
return nullptr; // bailing out of the compile; do not try to parse
}
Parse parser(jvms, method(), _expected_uses);
if (C->failing()) return nullptr;
// Grab signature for matching/allocation
GraphKit& exits = parser.exits();
if (C->failing()) {
while (exits.pop_exception_state() != nullptr) ;
return nullptr;
}
assert(exits.jvms()->same_calls_as(jvms), "sanity");
// Simply return the exit state of the parser,
// augmented by any exceptional states.
return exits.transfer_exceptions_into_jvms();
}
//---------------------------DirectCallGenerator------------------------------
// Internal class which handles all out-of-line calls w/o receiver type checks.
class DirectCallGenerator : public CallGenerator {
private:
CallStaticJavaNode* _call_node;
// Force separate memory and I/O projections for the exceptional
// paths to facilitate late inlining.
bool _separate_io_proj;
protected:
void set_call_node(CallStaticJavaNode* call) { _call_node = call; }
public:
DirectCallGenerator(ciMethod* method, bool separate_io_proj)
: CallGenerator(method),
_call_node(nullptr),
_separate_io_proj(separate_io_proj)
{
if (InlineTypeReturnedAsFields && method->is_method_handle_intrinsic()) {
// If that call has not been optimized by the time optimizations are over,
// we'll need to add a call to create an inline type instance from the klass
// returned by the call (see PhaseMacroExpand::expand_mh_intrinsic_return).
// Separating memory and I/O projections for exceptions is required to
// perform that graph transformation.
_separate_io_proj = true;
}
}
virtual JVMState* generate(JVMState* jvms);
virtual CallNode* call_node() const { return _call_node; }
virtual CallGenerator* with_call_node(CallNode* call) {
DirectCallGenerator* dcg = new DirectCallGenerator(method(), _separate_io_proj);
dcg->set_call_node(call->as_CallStaticJava());
return dcg;
}
};
JVMState* DirectCallGenerator::generate(JVMState* jvms) {
GraphKit kit(jvms);
PhaseGVN& gvn = kit.gvn();
bool is_static = method()->is_static();
address target = is_static ? SharedRuntime::get_resolve_static_call_stub()
: SharedRuntime::get_resolve_opt_virtual_call_stub();
if (kit.C->log() != nullptr) {
kit.C->log()->elem("direct_call bci='%d'", jvms->bci());
}
CallStaticJavaNode* call = new CallStaticJavaNode(kit.C, tf(), target, method());
if (is_inlined_method_handle_intrinsic(jvms, method())) {
// To be able to issue a direct call and skip a call to MH.linkTo*/invokeBasic adapter,
// additional information about the method being invoked should be attached
// to the call site to make resolution logic work
// (see SharedRuntime::resolve_static_call_C).
call->set_override_symbolic_info(true);
}
_call_node = call; // Save the call node in case we need it later
if (!is_static) {
// Make an explicit receiver null_check as part of this call.
// Since we share a map with the caller, his JVMS gets adjusted.
kit.null_check_receiver_before_call(method());
if (kit.stopped()) {
// And dump it back to the caller, decorated with any exceptions:
return kit.transfer_exceptions_into_jvms();
}
// Mark the call node as virtual, sort of:
call->set_optimized_virtual(true);
}
kit.set_arguments_for_java_call(call, is_late_inline());
if (kit.stopped()) {
return kit.transfer_exceptions_into_jvms();
}
kit.set_edges_for_java_call(call, false, _separate_io_proj);
Node* ret = kit.set_results_for_java_call(call, _separate_io_proj);
kit.push_node(method()->return_type()->basic_type(), ret);
return kit.transfer_exceptions_into_jvms();
}
//--------------------------VirtualCallGenerator------------------------------
// Internal class which handles all out-of-line calls checking receiver type.
class VirtualCallGenerator : public CallGenerator {
private:
int _vtable_index;
bool _separate_io_proj;
CallDynamicJavaNode* _call_node;
protected:
void set_call_node(CallDynamicJavaNode* call) { _call_node = call; }
public:
VirtualCallGenerator(ciMethod* method, int vtable_index, bool separate_io_proj)
: CallGenerator(method), _vtable_index(vtable_index), _separate_io_proj(separate_io_proj), _call_node(nullptr)
{
assert(vtable_index == Method::invalid_vtable_index ||
vtable_index >= 0, "either invalid or usable");
}
virtual bool is_virtual() const { return true; }
virtual JVMState* generate(JVMState* jvms);
virtual CallNode* call_node() const { return _call_node; }
int vtable_index() const { return _vtable_index; }
virtual CallGenerator* with_call_node(CallNode* call) {
VirtualCallGenerator* cg = new VirtualCallGenerator(method(), _vtable_index, _separate_io_proj);
cg->set_call_node(call->as_CallDynamicJava());
return cg;
}
};
JVMState* VirtualCallGenerator::generate(JVMState* jvms) {
GraphKit kit(jvms);
Node* receiver = kit.argument(0);
if (kit.C->log() != nullptr) {
kit.C->log()->elem("virtual_call bci='%d'", jvms->bci());
}
// If the receiver is a constant null, do not torture the system
// by attempting to call through it. The compile will proceed
// correctly, but may bail out in final_graph_reshaping, because
// the call instruction will have a seemingly deficient out-count.
// (The bailout says something misleading about an "infinite loop".)
if (kit.gvn().type(receiver)->higher_equal(TypePtr::NULL_PTR)) {
assert(Bytecodes::is_invoke(kit.java_bc()), "%d: %s", kit.java_bc(), Bytecodes::name(kit.java_bc()));
ciMethod* declared_method = kit.method()->get_method_at_bci(kit.bci());
int arg_size = declared_method->signature()->arg_size_for_bc(kit.java_bc());
kit.inc_sp(arg_size); // restore arguments
kit.uncommon_trap(Deoptimization::Reason_null_check,
Deoptimization::Action_none,
nullptr, "null receiver");
return kit.transfer_exceptions_into_jvms();
}
// Ideally we would unconditionally do a null check here and let it
// be converted to an implicit check based on profile information.
// However currently the conversion to implicit null checks in
// Block::implicit_null_check() only looks for loads and stores, not calls.
ciMethod *caller = kit.method();
ciMethodData *caller_md = (caller == nullptr) ? nullptr : caller->method_data();
if (!UseInlineCaches || !ImplicitNullChecks || !os::zero_page_read_protected() ||
((ImplicitNullCheckThreshold > 0) && caller_md &&
(caller_md->trap_count(Deoptimization::Reason_null_check)
>= (uint)ImplicitNullCheckThreshold))) {
// Make an explicit receiver null_check as part of this call.
// Since we share a map with the caller, his JVMS gets adjusted.
receiver = kit.null_check_receiver_before_call(method());
if (kit.stopped()) {
// And dump it back to the caller, decorated with any exceptions:
return kit.transfer_exceptions_into_jvms();
}
}
assert(!method()->is_static(), "virtual call must not be to static");
assert(!method()->is_final(), "virtual call should not be to final");
assert(!method()->is_private(), "virtual call should not be to private");
assert(_vtable_index == Method::invalid_vtable_index || !UseInlineCaches,
"no vtable calls if +UseInlineCaches ");
address target = SharedRuntime::get_resolve_virtual_call_stub();
// Normal inline cache used for call
CallDynamicJavaNode* call = new CallDynamicJavaNode(tf(), target, method(), _vtable_index);
if (is_inlined_method_handle_intrinsic(jvms, method())) {
// To be able to issue a direct call (optimized virtual or virtual)
// and skip a call to MH.linkTo*/invokeBasic adapter, additional information
// about the method being invoked should be attached to the call site to
// make resolution logic work (see SharedRuntime::resolve_{virtual,opt_virtual}_call_C).
call->set_override_symbolic_info(true);
}
_call_node = call; // Save the call node in case we need it later
kit.set_arguments_for_java_call(call);
if (kit.stopped()) {
return kit.transfer_exceptions_into_jvms();
}
kit.set_edges_for_java_call(call, false /*must_throw*/, _separate_io_proj);
Node* ret = kit.set_results_for_java_call(call, _separate_io_proj);
kit.push_node(method()->return_type()->basic_type(), ret);
// Represent the effect of an implicit receiver null_check
// as part of this call. Since we share a map with the caller,
// his JVMS gets adjusted.
kit.cast_not_null(receiver);
return kit.transfer_exceptions_into_jvms();
}
CallGenerator* CallGenerator::for_inline(ciMethod* m, float expected_uses) {
if (InlineTree::check_can_parse(m) != nullptr) return nullptr;
return new ParseGenerator(m, expected_uses);
}
// As a special case, the JVMS passed to this CallGenerator is
// for the method execution already in progress, not just the JVMS
// of the caller. Thus, this CallGenerator cannot be mixed with others!
CallGenerator* CallGenerator::for_osr(ciMethod* m, int osr_bci) {
if (InlineTree::check_can_parse(m) != nullptr) return nullptr;
float past_uses = m->interpreter_invocation_count();
float expected_uses = past_uses;
return new ParseGenerator(m, expected_uses, true);
}
CallGenerator* CallGenerator::for_direct_call(ciMethod* m, bool separate_io_proj) {
assert(!m->is_abstract(), "for_direct_call mismatch");
return new DirectCallGenerator(m, separate_io_proj);
}
CallGenerator* CallGenerator::for_virtual_call(ciMethod* m, int vtable_index) {
assert(!m->is_static(), "for_virtual_call mismatch");
assert(!m->is_method_handle_intrinsic(), "should be a direct call");
return new VirtualCallGenerator(m, vtable_index, false /*separate_io_projs*/);
}
// Allow inlining decisions to be delayed
class LateInlineCallGenerator : public DirectCallGenerator {
private:
jlong _unique_id; // unique id for log compilation
bool _is_pure_call; // a hint that the call doesn't have important side effects to care about
protected:
CallGenerator* _inline_cg;
virtual bool do_late_inline_check(Compile* C, JVMState* jvms) { return true; }
virtual CallGenerator* inline_cg() const { return _inline_cg; }
virtual bool is_pure_call() const { return _is_pure_call; }
public:
LateInlineCallGenerator(ciMethod* method, CallGenerator* inline_cg, bool is_pure_call = false) :
DirectCallGenerator(method, true), _unique_id(0), _is_pure_call(is_pure_call), _inline_cg(inline_cg) {}
virtual bool is_late_inline() const { return true; }
// Convert the CallStaticJava into an inline
virtual void do_late_inline();
virtual JVMState* generate(JVMState* jvms) {
Compile *C = Compile::current();
C->log_inline_id(this);
// Record that this call site should be revisited once the main
// parse is finished.
if (!is_mh_late_inline()) {
C->add_late_inline(this);
}
// Emit the CallStaticJava and request separate projections so
// that the late inlining logic can distinguish between fall
// through and exceptional uses of the memory and io projections
// as is done for allocations and macro expansion.
return DirectCallGenerator::generate(jvms);
}
virtual void set_unique_id(jlong id) {
_unique_id = id;
}
virtual jlong unique_id() const {
return _unique_id;
}
virtual CallGenerator* inline_cg() {
return _inline_cg;
}
virtual CallGenerator* with_call_node(CallNode* call) {
LateInlineCallGenerator* cg = new LateInlineCallGenerator(method(), _inline_cg, _is_pure_call);
cg->set_call_node(call->as_CallStaticJava());
return cg;
}
};
CallGenerator* CallGenerator::for_late_inline(ciMethod* method, CallGenerator* inline_cg) {
return new LateInlineCallGenerator(method, inline_cg);
}
class LateInlineMHCallGenerator : public LateInlineCallGenerator {
ciMethod* _caller;
bool _input_not_const;
virtual bool do_late_inline_check(Compile* C, JVMState* jvms);
public:
LateInlineMHCallGenerator(ciMethod* caller, ciMethod* callee, bool input_not_const) :
LateInlineCallGenerator(callee, nullptr), _caller(caller), _input_not_const(input_not_const) {}
virtual bool is_mh_late_inline() const { return true; }
// Convert the CallStaticJava into an inline
virtual void do_late_inline();
virtual JVMState* generate(JVMState* jvms) {
JVMState* new_jvms = LateInlineCallGenerator::generate(jvms);
Compile* C = Compile::current();
if (_input_not_const) {
// inlining won't be possible so no need to enqueue right now.
call_node()->set_generator(this);
} else {
C->add_late_inline(this);
}
return new_jvms;
}
virtual CallGenerator* with_call_node(CallNode* call) {
LateInlineMHCallGenerator* cg = new LateInlineMHCallGenerator(_caller, method(), _input_not_const);
cg->set_call_node(call->as_CallStaticJava());
return cg;
}
};
bool LateInlineMHCallGenerator::do_late_inline_check(Compile* C, JVMState* jvms) {
// When inlining a virtual call, the null check at the call and the call itself can throw. These 2 paths have different
// expression stacks which causes late inlining to break. The MH invoker is not expected to be called from a method with
// exception handlers. When there is no exception handler, GraphKit::builtin_throw() pops the stack which solves the issue
// of late inlining with exceptions.
assert(!jvms->method()->has_exception_handlers() ||
(method()->intrinsic_id() != vmIntrinsics::_linkToVirtual &&
method()->intrinsic_id() != vmIntrinsics::_linkToInterface), "no exception handler expected");
// Even if inlining is not allowed, a virtual call can be strength-reduced to a direct call.
bool allow_inline = C->inlining_incrementally();
bool input_not_const = true;
CallGenerator* cg = for_method_handle_inline(jvms, _caller, method(), allow_inline, input_not_const);
assert(!input_not_const, "sanity"); // shouldn't have been scheduled for inlining in the first place
if (cg != nullptr) {
// AlwaysIncrementalInline causes for_method_handle_inline() to
// return a LateInlineCallGenerator. Extract the
// InlineCallGenerator from it.
if (AlwaysIncrementalInline && cg->is_late_inline() && !cg->is_virtual_late_inline()) {
cg = cg->inline_cg();
assert(cg != nullptr, "inline call generator expected");
}
if (!allow_inline) {
C->inline_printer()->record(cg->method(), call_node()->jvms(), InliningResult::FAILURE,
"late method handle call resolution");
}
assert(!cg->is_late_inline() || cg->is_mh_late_inline() || cg->is_virtual_late_inline() ||
AlwaysIncrementalInline || StressIncrementalInlining, "we're doing late inlining");
_inline_cg = cg;
return true;
} else {
// Method handle call which has a constant appendix argument should be either inlined or replaced with a direct call
// unless there's a signature mismatch between caller and callee. If the failure occurs, there's not much to be improved later,
// so don't reinstall the generator to avoid pushing the generator between IGVN and incremental inlining indefinitely.
return false;
}
}
CallGenerator* CallGenerator::for_mh_late_inline(ciMethod* caller, ciMethod* callee, bool input_not_const) {
assert(IncrementalInlineMH, "required");
Compile::current()->mark_has_mh_late_inlines();
CallGenerator* cg = new LateInlineMHCallGenerator(caller, callee, input_not_const);
return cg;
}
// Allow inlining decisions to be delayed
class LateInlineVirtualCallGenerator : public VirtualCallGenerator {
private:
jlong _unique_id; // unique id for log compilation
CallGenerator* _inline_cg;
ciMethod* _callee;
bool _is_pure_call;
float _prof_factor;
protected:
virtual bool do_late_inline_check(Compile* C, JVMState* jvms);
virtual CallGenerator* inline_cg() const { return _inline_cg; }
virtual bool is_pure_call() const { return _is_pure_call; }
public:
LateInlineVirtualCallGenerator(ciMethod* method, int vtable_index, float prof_factor)
: VirtualCallGenerator(method, vtable_index, true /*separate_io_projs*/),
_unique_id(0), _inline_cg(nullptr), _callee(nullptr), _is_pure_call(false), _prof_factor(prof_factor) {
assert(IncrementalInlineVirtual, "required");
}
virtual bool is_late_inline() const { return true; }
virtual bool is_virtual_late_inline() const { return true; }
// Convert the CallDynamicJava into an inline
virtual void do_late_inline();
virtual ciMethod* callee_method() {
return _callee;
}
virtual void set_callee_method(ciMethod* m) {
assert(_callee == nullptr || _callee == m, "repeated inline attempt with different callee");
_callee = m;
}
virtual JVMState* generate(JVMState* jvms) {
// Emit the CallDynamicJava and request separate projections so
// that the late inlining logic can distinguish between fall
// through and exceptional uses of the memory and io projections
// as is done for allocations and macro expansion.
JVMState* new_jvms = VirtualCallGenerator::generate(jvms);
if (call_node() != nullptr) {
call_node()->set_generator(this);
}
return new_jvms;
}
virtual void set_unique_id(jlong id) {
_unique_id = id;
}
virtual jlong unique_id() const {
return _unique_id;
}
virtual CallGenerator* with_call_node(CallNode* call) {
LateInlineVirtualCallGenerator* cg = new LateInlineVirtualCallGenerator(method(), vtable_index(), _prof_factor);
cg->set_call_node(call->as_CallDynamicJava());
return cg;
}
};
bool LateInlineVirtualCallGenerator::do_late_inline_check(Compile* C, JVMState* jvms) {
// Method handle linker case is handled in CallDynamicJavaNode::Ideal().
// Unless inlining is performed, _override_symbolic_info bit will be set in DirectCallGenerator::generate().
// Implicit receiver null checks introduce problems when exception states are combined.
Node* receiver = jvms->map()->argument(jvms, 0);
const Type* recv_type = C->initial_gvn()->type(receiver);
if (recv_type->maybe_null()) {
C->inline_printer()->record(method(), call_node()->jvms(), InliningResult::FAILURE,
"late call devirtualization failed (receiver may be null)");
return false;
}
// Even if inlining is not allowed, a virtual call can be strength-reduced to a direct call.
bool allow_inline = C->inlining_incrementally();
if (!allow_inline && _callee->holder()->is_interface()) {
// Don't convert the interface call to a direct call guarded by an interface subtype check.
C->inline_printer()->record(method(), call_node()->jvms(), InliningResult::FAILURE,
"late call devirtualization failed (interface call)");
return false;
}
CallGenerator* cg = C->call_generator(_callee,
vtable_index(),
false /*call_does_dispatch*/,
jvms,
allow_inline,
_prof_factor,
nullptr /*speculative_receiver_type*/,
true /*allow_intrinsics*/);
if (cg != nullptr) {
if (!allow_inline) {
C->inline_printer()->record(cg->method(), call_node()->jvms(), InliningResult::FAILURE, "late call devirtualization");
}
assert(!cg->is_late_inline() || cg->is_mh_late_inline() || AlwaysIncrementalInline || StressIncrementalInlining, "we're doing late inlining");
_inline_cg = cg;
return true;
} else {
// Virtual call which provably doesn't dispatch should be either inlined or replaced with a direct call.
assert(false, "no progress");
return false;
}
}
CallGenerator* CallGenerator::for_late_inline_virtual(ciMethod* m, int vtable_index, float prof_factor) {
assert(IncrementalInlineVirtual, "required");
assert(!m->is_static(), "for_virtual_call mismatch");
assert(!m->is_method_handle_intrinsic(), "should be a direct call");
return new LateInlineVirtualCallGenerator(m, vtable_index, prof_factor);
}
void LateInlineCallGenerator::do_late_inline() {
CallGenerator::do_late_inline_helper();
}
void LateInlineMHCallGenerator::do_late_inline() {
CallGenerator::do_late_inline_helper();
}
void LateInlineVirtualCallGenerator::do_late_inline() {
assert(_callee != nullptr, "required"); // set up in CallDynamicJavaNode::Ideal
CallGenerator::do_late_inline_helper();
}
void CallGenerator::do_late_inline_helper() {
assert(is_late_inline(), "only late inline allowed");
// Can't inline it
CallNode* call = call_node();
if (call == nullptr || call->outcnt() == 0 ||
call->in(0) == nullptr || call->in(0)->is_top()) {
return;
}
const TypeTuple* r = call->tf()->domain_cc();
for (uint i1 = TypeFunc::Parms; i1 < r->cnt(); i1++) {
if (call->in(i1)->is_top() && r->field_at(i1) != Type::HALF) {
assert(Compile::current()->inlining_incrementally(), "shouldn't happen during parsing");
return;
}
}
if (call->in(TypeFunc::Memory)->is_top()) {
assert(Compile::current()->inlining_incrementally(), "shouldn't happen during parsing");
return;
}
if (call->in(TypeFunc::Memory)->is_MergeMem()) {
MergeMemNode* merge_mem = call->in(TypeFunc::Memory)->as_MergeMem();
if (merge_mem->base_memory() == merge_mem->empty_memory()) {
return; // dead path
}
}
// check for unreachable loop
// Similar to incremental inlining, don't assert that all call
// projections are still there for post-parse call devirtualization.
bool do_asserts = !is_mh_late_inline() && !is_virtual_late_inline();
CallProjections* callprojs = call->extract_projections(true, do_asserts);
if ((callprojs->fallthrough_catchproj == call->in(0)) ||
(callprojs->catchall_catchproj == call->in(0)) ||
(callprojs->fallthrough_memproj == call->in(TypeFunc::Memory)) ||
(callprojs->catchall_memproj == call->in(TypeFunc::Memory)) ||
(callprojs->fallthrough_ioproj == call->in(TypeFunc::I_O)) ||
(callprojs->catchall_ioproj == call->in(TypeFunc::I_O)) ||
(callprojs->exobj != nullptr && call->find_edge(callprojs->exobj) != -1)) {
return;
}
Compile* C = Compile::current();
// Remove inlined methods from Compiler's lists.
if (call->is_macro()) {
C->remove_macro_node(call);
}
bool result_not_used = true;
for (uint i = 0; i < callprojs->nb_resproj; i++) {
if (callprojs->resproj[i] != nullptr) {
if (callprojs->resproj[i]->outcnt() != 0) {
result_not_used = false;
}
if (call->find_edge(callprojs->resproj[i]) != -1) {
return;
}
}
}
if (is_pure_call() && result_not_used) {
// The call is marked as pure (no important side effects), but result isn't used.
// It's safe to remove the call.
GraphKit kit(call->jvms());
kit.replace_call(call, C->top(), true, do_asserts);
} else {
// Make a clone of the JVMState that appropriate to use for driving a parse
JVMState* old_jvms = call->jvms();
JVMState* jvms = old_jvms->clone_shallow(C);
uint size = call->req();
SafePointNode* map = new SafePointNode(size, jvms);
for (uint i1 = 0; i1 < size; i1++) {
map->init_req(i1, call->in(i1));
}
PhaseGVN& gvn = *C->initial_gvn();
// Make sure the state is a MergeMem for parsing.
if (!map->in(TypeFunc::Memory)->is_MergeMem()) {
Node* mem = MergeMemNode::make(map->in(TypeFunc::Memory));
gvn.set_type_bottom(mem);
map->set_req(TypeFunc::Memory, mem);
}
// blow away old call arguments
for (uint i1 = TypeFunc::Parms; i1 < r->cnt(); i1++) {
map->set_req(i1, C->top());
}
jvms->set_map(map);
// Make enough space in the expression stack to transfer
// the incoming arguments and return value.
map->ensure_stack(jvms, jvms->method()->max_stack());
const TypeTuple* domain_sig = call->_tf->domain_sig();
uint nargs = method()->arg_size();
assert(domain_sig->cnt() - TypeFunc::Parms == nargs, "inconsistent signature");
uint j = TypeFunc::Parms;
int arg_num = 0;
for (uint i1 = 0; i1 < nargs; i1++) {
const Type* t = domain_sig->field_at(TypeFunc::Parms + i1);
if (t->is_inlinetypeptr() && !method()->mismatch() && method()->is_scalarized_arg(arg_num)) {
// Inline type arguments are not passed by reference: we get an argument per
// field of the inline type. Build InlineTypeNodes from the inline type arguments.
GraphKit arg_kit(jvms, &gvn);
Node* vt = InlineTypeNode::make_from_multi(&arg_kit, call, t->inline_klass(), j, /* in= */ true, /* null_free= */ !t->maybe_null());
map = arg_kit.map();
map->set_control(arg_kit.control());
map->set_argument(jvms, i1, vt);
} else {
map->set_argument(jvms, i1, call->in(j++));
}
if (t != Type::HALF) {
arg_num++;
}
}
C->log_late_inline(this);
// JVMState is ready, so time to perform some checks and prepare for inlining attempt.
if (!do_late_inline_check(C, jvms)) {
map->disconnect_inputs(C);
return;
}
// Check if we are late inlining a method handle call that returns an inline type as fields.
Node* buffer_oop = nullptr;
ciMethod* inline_method = inline_cg()->method();
ciType* return_type = inline_method->return_type();
if (!call->tf()->returns_inline_type_as_fields() &&
return_type->is_inlinetype() && return_type->as_inline_klass()->can_be_returned_as_fields()) {
assert(is_mh_late_inline(), "Unexpected return type");
// Allocate a buffer for the inline type returned as fields because the caller expects an oop return.
// Do this before the method handle call in case the buffer allocation triggers deoptimization and
// we need to "re-execute" the call in the interpreter (to make sure the call is only executed once).
GraphKit arg_kit(jvms, &gvn);
{
PreserveReexecuteState preexecs(&arg_kit);
arg_kit.jvms()->set_should_reexecute(true);
arg_kit.inc_sp(nargs);
Node* klass_node = arg_kit.makecon(TypeKlassPtr::make(return_type->as_inline_klass()));
buffer_oop = arg_kit.new_instance(klass_node, nullptr, nullptr, /* deoptimize_on_exception */ true);
}
jvms = arg_kit.transfer_exceptions_into_jvms();
}
// Setup default node notes to be picked up by the inlining
Node_Notes* old_nn = C->node_notes_at(call->_idx);
if (old_nn != nullptr) {
Node_Notes* entry_nn = old_nn->clone(C);
entry_nn->set_jvms(jvms);
C->set_default_node_notes(entry_nn);
}
// Now perform the inlining using the synthesized JVMState
JVMState* new_jvms = inline_cg()->generate(jvms);
if (new_jvms == nullptr) return; // no change
if (C->failing()) return;
if (is_mh_late_inline()) {
C->inline_printer()->record(method(), jvms, InliningResult::SUCCESS, "late inline succeeded (method handle)");
} else if (is_string_late_inline()) {
C->inline_printer()->record(method(), jvms, InliningResult::SUCCESS, "late inline succeeded (string method)");
} else if (is_boxing_late_inline()) {
C->inline_printer()->record(method(), jvms, InliningResult::SUCCESS, "late inline succeeded (boxing method)");
} else if (is_vector_reboxing_late_inline()) {
C->inline_printer()->record(method(), jvms, InliningResult::SUCCESS, "late inline succeeded (vector reboxing method)");
} else {
C->inline_printer()->record(method(), jvms, InliningResult::SUCCESS, "late inline succeeded");
}
// Capture any exceptional control flow
GraphKit kit(new_jvms);
// Find the result object
Node* result = C->top();
int result_size = method()->return_type()->size();
if (result_size != 0 && !kit.stopped()) {
result = (result_size == 1) ? kit.pop() : kit.pop_pair();
}
if (call->is_CallStaticJava() && call->as_CallStaticJava()->is_boxing_method()) {
result = kit.must_be_not_null(result, false);
}
if (inline_cg()->is_inline()) {
C->set_has_loops(C->has_loops() || inline_method->has_loops());
C->env()->notice_inlined_method(inline_method);
}
C->set_inlining_progress(true);
C->set_do_cleanup(kit.stopped()); // path is dead; needs cleanup
// Handle inline type returns
InlineTypeNode* vt = result->isa_InlineType();
if (vt != nullptr) {
if (call->tf()->returns_inline_type_as_fields()) {
vt->replace_call_results(&kit, call, C);
} else {
// Result might still be allocated (for example, if it has been stored to a non-flat field)
if (!vt->is_allocated(&kit.gvn())) {
assert(buffer_oop != nullptr, "should have allocated a buffer");
RegionNode* region = new RegionNode(3);
// Check if result is null
Node* null_ctl = kit.top();
kit.null_check_common(vt->get_null_marker(), T_INT, false, &null_ctl);
region->init_req(1, null_ctl);
PhiNode* oop = PhiNode::make(region, kit.gvn().zerocon(T_OBJECT), TypeInstPtr::make(TypePtr::BotPTR, vt->type()->inline_klass()));
Node* init_mem = kit.reset_memory();
PhiNode* mem = PhiNode::make(region, init_mem, Type::MEMORY, TypePtr::BOTTOM);
// Not null, initialize the buffer
kit.set_all_memory(init_mem);
Node* payload_ptr = kit.basic_plus_adr(buffer_oop, kit.gvn().type(vt)->inline_klass()->payload_offset());
vt->store_flat(&kit, buffer_oop, payload_ptr, false, true, true, IN_HEAP | MO_UNORDERED);
// Do not let stores that initialize this buffer be reordered with a subsequent
// store that would make this buffer accessible by other threads.
AllocateNode* alloc = AllocateNode::Ideal_allocation(buffer_oop);
assert(alloc != nullptr, "must have an allocation node");
kit.insert_mem_bar(Op_MemBarStoreStore, alloc->proj_out_or_null(AllocateNode::RawAddress));
region->init_req(2, kit.control());
oop->init_req(2, buffer_oop);
mem->init_req(2, kit.merged_memory());
// Update oop input to buffer
kit.gvn().hash_delete(vt);
vt->set_oop(kit.gvn(), kit.gvn().transform(oop));
vt->set_is_buffered(kit.gvn());
vt = kit.gvn().transform(vt)->as_InlineType();
kit.set_control(kit.gvn().transform(region));
kit.set_all_memory(kit.gvn().transform(mem));
kit.record_for_igvn(region);
kit.record_for_igvn(oop);
kit.record_for_igvn(mem);
}
result = vt;
}
DEBUG_ONLY(buffer_oop = nullptr);
} else {
assert(result->is_top() || !call->tf()->returns_inline_type_as_fields() || !call->as_CallJava()->method()->return_type()->is_loaded(), "Unexpected return value");
}
assert(kit.stopped() || buffer_oop == nullptr, "unused buffer allocation");
kit.replace_call(call, result, true, do_asserts);
}
}
class LateInlineStringCallGenerator : public LateInlineCallGenerator {
public:
LateInlineStringCallGenerator(ciMethod* method, CallGenerator* inline_cg) :
LateInlineCallGenerator(method, inline_cg) {}
virtual JVMState* generate(JVMState* jvms) {
Compile *C = Compile::current();
C->log_inline_id(this);
C->add_string_late_inline(this);
JVMState* new_jvms = DirectCallGenerator::generate(jvms);
return new_jvms;
}
virtual bool is_string_late_inline() const { return true; }
virtual CallGenerator* with_call_node(CallNode* call) {
LateInlineStringCallGenerator* cg = new LateInlineStringCallGenerator(method(), _inline_cg);
cg->set_call_node(call->as_CallStaticJava());
return cg;
}
};
CallGenerator* CallGenerator::for_string_late_inline(ciMethod* method, CallGenerator* inline_cg) {
return new LateInlineStringCallGenerator(method, inline_cg);
}
class LateInlineBoxingCallGenerator : public LateInlineCallGenerator {
public:
LateInlineBoxingCallGenerator(ciMethod* method, CallGenerator* inline_cg) :
LateInlineCallGenerator(method, inline_cg, /*is_pure=*/true) {}
virtual JVMState* generate(JVMState* jvms) {
Compile *C = Compile::current();
C->log_inline_id(this);
C->add_boxing_late_inline(this);
JVMState* new_jvms = DirectCallGenerator::generate(jvms);
return new_jvms;
}
virtual bool is_boxing_late_inline() const { return true; }
virtual CallGenerator* with_call_node(CallNode* call) {
LateInlineBoxingCallGenerator* cg = new LateInlineBoxingCallGenerator(method(), _inline_cg);
cg->set_call_node(call->as_CallStaticJava());
return cg;
}
};
CallGenerator* CallGenerator::for_boxing_late_inline(ciMethod* method, CallGenerator* inline_cg) {
return new LateInlineBoxingCallGenerator(method, inline_cg);
}
class LateInlineVectorReboxingCallGenerator : public LateInlineCallGenerator {
public:
LateInlineVectorReboxingCallGenerator(ciMethod* method, CallGenerator* inline_cg) :
LateInlineCallGenerator(method, inline_cg, /*is_pure=*/true) {}
virtual JVMState* generate(JVMState* jvms) {
Compile *C = Compile::current();
C->log_inline_id(this);
C->add_vector_reboxing_late_inline(this);
JVMState* new_jvms = DirectCallGenerator::generate(jvms);
return new_jvms;
}
virtual bool is_vector_reboxing_late_inline() const { return true; }
virtual CallGenerator* with_call_node(CallNode* call) {
LateInlineVectorReboxingCallGenerator* cg = new LateInlineVectorReboxingCallGenerator(method(), _inline_cg);
cg->set_call_node(call->as_CallStaticJava());
return cg;
}
};
// static CallGenerator* for_vector_reboxing_late_inline(ciMethod* m, CallGenerator* inline_cg);
CallGenerator* CallGenerator::for_vector_reboxing_late_inline(ciMethod* method, CallGenerator* inline_cg) {
return new LateInlineVectorReboxingCallGenerator(method, inline_cg);
}
//------------------------PredictedCallGenerator------------------------------
// Internal class which handles all out-of-line calls checking receiver type.
class PredictedCallGenerator : public CallGenerator {
ciKlass* _predicted_receiver;
CallGenerator* _if_missed;
CallGenerator* _if_hit;
float _hit_prob;
bool _exact_check;
public:
PredictedCallGenerator(ciKlass* predicted_receiver,
CallGenerator* if_missed,
CallGenerator* if_hit, bool exact_check,
float hit_prob)
: CallGenerator(if_missed->method())
{
// The call profile data may predict the hit_prob as extreme as 0 or 1.
// Remove the extremes values from the range.
if (hit_prob > PROB_MAX) hit_prob = PROB_MAX;
if (hit_prob < PROB_MIN) hit_prob = PROB_MIN;
_predicted_receiver = predicted_receiver;
_if_missed = if_missed;
_if_hit = if_hit;
_hit_prob = hit_prob;
_exact_check = exact_check;
}
virtual bool is_virtual() const { return true; }
virtual bool is_inline() const { return _if_hit->is_inline(); }
virtual bool is_deferred() const { return _if_hit->is_deferred(); }
virtual JVMState* generate(JVMState* jvms);
};
CallGenerator* CallGenerator::for_predicted_call(ciKlass* predicted_receiver,
CallGenerator* if_missed,
CallGenerator* if_hit,
float hit_prob) {
return new PredictedCallGenerator(predicted_receiver, if_missed, if_hit,
/*exact_check=*/true, hit_prob);
}
CallGenerator* CallGenerator::for_guarded_call(ciKlass* guarded_receiver,
CallGenerator* if_missed,
CallGenerator* if_hit) {
return new PredictedCallGenerator(guarded_receiver, if_missed, if_hit,
/*exact_check=*/false, PROB_ALWAYS);
}
JVMState* PredictedCallGenerator::generate(JVMState* jvms) {
GraphKit kit(jvms);
PhaseGVN& gvn = kit.gvn();
// We need an explicit receiver null_check before checking its type.
// We share a map with the caller, so his JVMS gets adjusted.
Node* receiver = kit.argument(0);
CompileLog* log = kit.C->log();
if (log != nullptr) {
log->elem("predicted_call bci='%d' exact='%d' klass='%d'",
jvms->bci(), (_exact_check ? 1 : 0), log->identify(_predicted_receiver));
}