Test code:
```
static Object compute0(Object start, long count) {
double current = (Double) start;
for (long i = 0l; i < count; i++) {
current = nextPrim(current);
}
return current;
}
static Object compute1(Object start, long count) {
Double current = (Double) start;
for (long i = 0l; i < count; i++) {
current = nextDouble(current);
}
return current;
}
static Object compute2(Object start, long count) {
Object current = start;
for (long i = 0l; i < count; i++) {
current = nextDouble((Double) current);
}
return current;
}
static Object compute3(Object start, long count) {
Object current = start;
for (long i = 0l; i < count; i++) {
current = nextObject(current);
}
return current;
}
static double nextPrim(double x) {
return x * 1.0001d;
}
static Double nextDouble(Double x) {
return x * 1.0001d;
}
static Object nextObject(Object x) {
return ((Double) x) * 1.0001d;
}
```
With preview features off (and also in JDK 25), JIT eliminates all boxing in the `compute` methods, and they are equally fast.
With preview features on, `compute2` and `compute3` have about 2x execution time, presumably due to unnecessary heap allocations of `Double` value objects.
(Also observed, FWIW: using EA2, all of `compute1`, `compute2`, and `compute3` had a regression with previous features *off*; `compute2` is slow with preview features on, and `compute3` is hard to get a read on—lots of variance.)
```
static Object compute0(Object start, long count) {
double current = (Double) start;
for (long i = 0l; i < count; i++) {
current = nextPrim(current);
}
return current;
}
static Object compute1(Object start, long count) {
Double current = (Double) start;
for (long i = 0l; i < count; i++) {
current = nextDouble(current);
}
return current;
}
static Object compute2(Object start, long count) {
Object current = start;
for (long i = 0l; i < count; i++) {
current = nextDouble((Double) current);
}
return current;
}
static Object compute3(Object start, long count) {
Object current = start;
for (long i = 0l; i < count; i++) {
current = nextObject(current);
}
return current;
}
static double nextPrim(double x) {
return x * 1.0001d;
}
static Double nextDouble(Double x) {
return x * 1.0001d;
}
static Object nextObject(Object x) {
return ((Double) x) * 1.0001d;
}
```
With preview features off (and also in JDK 25), JIT eliminates all boxing in the `compute` methods, and they are equally fast.
With preview features on, `compute2` and `compute3` have about 2x execution time, presumably due to unnecessary heap allocations of `Double` value objects.
(Also observed, FWIW: using EA2, all of `compute1`, `compute2`, and `compute3` had a regression with previous features *off*; `compute2` is slow with preview features on, and `compute3` is hard to get a read on—lots of variance.)