Join Us and become a Member for a Verified Badge to access private areas with the latest PS4 PKGs.
PS4 Jailbreaking       Thread starter PSXHAX       Start date Dec 19, 2016 at 3:05 AM       77      
Status
Not open for further replies.
Following their PS4 Kexec Code to Load the Linux Kernel and the addition of Kaslr 3.55, 4.00 and 4.01 Support today Marcan has merged the pull request in Fail0verflow's kexec system call GIT adding the PS4 4.05 Offsets for developers! :love:

This comes just over a week before their Console Hacking 2016 CCC Presentation which will be Streaming Live at 23:00 on December 27, 2016.

From Fail0verflow's PS4 kexec magic.h via Github: Add 4.05 offsets
Code:
#ifdef PS4_3_55
#define kern_off_printf 0x1df550
#define kern_off_copyin 0x3b96e0
#define kern_off_copyout 0x3b9660
#define kern_off_copyinstr 0x3b9a50
#define kern_off_kmem_alloc_contig 0x337ea0
#define kern_off_kmem_free 0x33bca0
#define kern_off_pmap_extract 0x3afd70
#define kern_off_pmap_protect 0x3b1f50
#define kern_off_sched_pin 0x1ced60
#define kern_off_sched_unpin 0x1cedc0
#define kern_off_smp_rendezvous 0x1e7810
#define kern_off_smp_no_rendevous_barrier 0x1e75d0
#define kern_off_icc_query_nowait 0x3ed450
#define kern_off_kernel_map 0x196acc8
#define kern_off_sysent 0xeed880
#define kern_off_kernel_pmap_store 0x19bd628
#define kern_off_Starsha_UcodeInfo 0x1869fa0

#define kern_off_pml4pml4i 0x19bd618
#define kern_off_dmpml4i 0x19bd61c
#define kern_off_dmpdpi 0x19bd620

#elif defined PS4_4_00 || PS4_4_01

#define kern_off_printf 0x347450
#define kern_off_copyin 0x286cc0
#define kern_off_copyout 0x286c40
#define kern_off_copyinstr 0x287030
#define kern_off_kmem_alloc_contig 0x275da0
#define kern_off_kmem_free 0x369580
#define kern_off_pmap_extract 0x3eeed0
#define kern_off_pmap_protect 0x3f1120
#define kern_off_sched_pin 0x1d1120
#define kern_off_sched_unpin 0x1d1180
#define kern_off_smp_rendezvous 0x34a020
#define kern_off_smp_no_rendevous_barrier 0x349de0
#define kern_off_icc_query_nowait 0x46c5a0
#define kern_off_kernel_map 0x1fe71b8
#define kern_off_sysent 0xf17790
#define kern_off_kernel_pmap_store 0x200c310
#define kern_off_Starsha_UcodeInfo 0x18dafb0

#define kern_off_pml4pml4i 0x200c300
#define kern_off_dmpml4i 0x200c304
#define kern_off_dmpdpi 0x200c308

#elif defined PS4_4_05

#define kern_off_printf 0x347580
#define kern_off_copyin 0x286df0
#define kern_off_copyout 0x286d70
#define kern_off_copyinstr 0x287160
#define kern_off_kmem_alloc_contig 0x275ed0
#define kern_off_kmem_free 0x3696b0
#define kern_off_pmap_extract 0x3ef000
#define kern_off_pmap_protect 0x3f1250
#define kern_off_sched_pin 0x1d1250
#define kern_off_sched_unpin 0x1d12B0
#define kern_off_smp_rendezvous 0x34a150
#define kern_off_smp_no_rendevous_barrier 0x349f10
#define kern_off_icc_query_nowait 0x46c6d0
#define kern_off_kernel_map 0x1fe71b8
#define kern_off_sysent 0xf17790
#define kern_off_kernel_pmap_store 0x200c310
#define kern_off_Starsha_UcodeInfo 0x18dafb0

#define kern_off_pml4pml4i 0x200c300
#define kern_off_dmpml4i 0x200c304
#define kern_off_dmpdpi 0x200c308

#endif
Since the thread has been posted there have been more updates and edits done towards the Kaslr from Marcan with the following addition updates that are listed below:
  • No RedZone
  • Support 64bit hook displacements
  • Cleanup more hardware and fix interrupt issues
  • Bump VRAM to 1GB from 512MB
If your are looking the additional updated code for KASLR they have been added below.

From Fail0verflow's PS4 kexec Linux_boot.c via Github: Cleanup more hardware and fix interrupt issues
Code:
/*
 * ps4-kexec - a kexec() implementation for Orbis OS / FreeBSD
 *
 * Copyright (C) 2015-2016 shuffle2 <[email protected]>
 * Copyright (C) 2015-2016 Hector Martin "marcan" <[email protected]>
 *
 * This code is licensed to you under the 2-clause BSD license. See the LICENSE
 * file for more information.
 */

#include "linux_boot.h"
#include "types.h"
#include "string.h"
#include "x86.h"
#include "kernel.h"
#include "uart.h"

void uart_write_byte(u8 b);

static u64 vram_base = 0x100000000;
// Current code assumes it's a power of two.
static u64 vram_size = 512 * 1024 * 1024;

#define DM_PML4_BASE ((kern.dmap_base >> PML4SHIFT) & 0x1ff)

struct desc_ptr {
    u16 limit;
    u64 address;
} __attribute__((packed));

struct desc_struct {
    u16 limit0;
    u16 base0;
    u16 base1: 8, type: 4, s: 1, dpl: 2, p: 1;
    u16 limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
} __attribute__((packed));

typedef void (*jmp_to_linux_t)(uintptr_t linux_startup, uintptr_t bootargs,
                               uintptr_t new_cr3, uintptr_t gdt_ptr);
extern uint8_t *jmp_to_linux;
extern size_t jmp_to_linux_size;

// FreeBSD DMAP addresses
struct linux_boot_info {
    void *linux_image;
    void *initramfs;
    size_t initramfs_size;
    struct boot_params *bp;
    char *cmd_line;
};
static struct linux_boot_info nix_info;

void set_nix_info(void *linux_image, struct boot_params *bp, void *initramfs,
                  size_t initramfs_size, char *cmd_line)
{
    nix_info.linux_image = linux_image;
    nix_info.bp = bp;
    nix_info.initramfs = initramfs;
    nix_info.initramfs_size = initramfs_size;
    nix_info.cmd_line = cmd_line;
}

static volatile int halted_cpus = 0;

static int curcpu(void)
{
    int cpuid;
    // TODO ensure offsetof(struct pcpu, pc_cpuid) == 0x34 on all fw
    asm volatile("mov %0, gs:0x34;" : "=r" (cpuid));
    return cpuid;
}

static void bp_add_smap_entry(struct boot_params *bp, u64 addr, u64 size,
                              u32 type)
{
    uint8_t idx = bp->e820_entries;
    bp->e820_map[idx].addr = addr;
    bp->e820_map[idx].size = size;
    bp->e820_map[idx].type = type;
    bp->e820_entries++;
}

void prepare_boot_params(struct boot_params *bp, u8 *linux_image)
{
    memset(bp, 0, sizeof(struct boot_params));
    struct boot_params *bp_src = (struct boot_params *)linux_image;
    memcpy(&bp->hdr, &bp_src->hdr, offsetof(struct setup_header, header) +
                                   ((u8 *)&bp_src->hdr.jump)[1]);

    // These values are from fw 1.01
    bp_add_smap_entry(bp, 0x0000000000, 0x0000008000, SMAP_TYPE_MEMORY);
    bp_add_smap_entry(bp, 0x0000008000, 0x0000078000, SMAP_TYPE_RESERVED);
    bp_add_smap_entry(bp, 0x0000080000, 0x000001a000, SMAP_TYPE_MEMORY);
    bp_add_smap_entry(bp, 0x000009a000, 0x0000006000, SMAP_TYPE_RESERVED);
    bp_add_smap_entry(bp, 0x00000a0000, 0x0000020000, SMAP_TYPE_RESERVED);
    bp_add_smap_entry(bp, 0x00000e0000, 0x0000010000, SMAP_TYPE_ACPI_RECLAIM);
    bp_add_smap_entry(bp, 0x0000100000, 0x0000300000, SMAP_TYPE_MEMORY);
    bp_add_smap_entry(bp, 0x0000400000, 0x0000080000, SMAP_TYPE_RESERVED);
    bp_add_smap_entry(bp, 0x0000480000, 0x0000200000, SMAP_TYPE_MEMORY);
    bp_add_smap_entry(bp, 0x0000680000, 0x0000080000, SMAP_TYPE_RESERVED);
    bp_add_smap_entry(bp, 0x0000700000, 0x007e8e8000, SMAP_TYPE_MEMORY);
    bp_add_smap_entry(bp, 0x007efe8000, 0x0000008000, SMAP_TYPE_ACPI_NVS);
    bp_add_smap_entry(bp, 0x007eff0000, 0x0000010000, SMAP_TYPE_ACPI_RECLAIM);
    // This used to be VRAM, but we reclaim it as RAM
    bp_add_smap_entry(bp, 0x007f000000, 0x0001000000, SMAP_TYPE_MEMORY);
    bp_add_smap_entry(bp, 0x0080000000, 0x0060000000, SMAP_TYPE_RESERVED);
    bp_add_smap_entry(bp, 0x00e0000000, 0x0018000000, SMAP_TYPE_RESERVED);
    // Instead, carve out VRAM from the beginning of high memory
    bp_add_smap_entry(bp, vram_base, vram_size, SMAP_TYPE_RESERVED);
    bp_add_smap_entry(bp, vram_base + vram_size, 0x017f000000 - vram_size,
                      SMAP_TYPE_MEMORY);
}

#define WR32(a, v) *(volatile u32 *)PA_TO_DM(a) = (v)

#define MC_VM_FB_LOCATION 0x2024
#define MC_VM_FB_OFFSET 0x2068
#define HDP_NONSURFACE_BASE 0x2c04
#define CONFIG_MEMSIZE 0x5428

static void configure_vram(void)
{
    u64 mmio_base = 0xe4800000;
    u64 fb_base = 0x0f00000000;
    u64 fb_top = fb_base + vram_size - 1;

    WR32(mmio_base + MC_VM_FB_LOCATION, 0);
    WR32(mmio_base + HDP_NONSURFACE_BASE, 0);

    WR32(mmio_base + MC_VM_FB_LOCATION,
         ((fb_top >> 24) << 16) | (fb_base >> 24));
    WR32(mmio_base + MC_VM_FB_OFFSET, vram_base >> 22);
    WR32(mmio_base + HDP_NONSURFACE_BASE, fb_base >> 8);
    WR32(mmio_base + CONFIG_MEMSIZE, vram_size >> 20);
}

#define IA32_MTRR_DEF_TYPE 0x2ff
#define MTRR_BASE(i) (0x200 + 2*i)
#define MTRR_MASK(i) (0x201 + 2*i)

static void setup_mtrr(void)
{
    disable_interrupts();
    u64 cr0 = cr0_read();
    cr0_write((cr0 | CR0_CD) & (~(u64)CR0_NW));
    wbinvd();
    cr3_write(cr3_read()); // TLB flush

    wrmsr(IA32_MTRR_DEF_TYPE, 0); // MTRRs disabled

    // Low memory (0GB-2GB) = WB
    wrmsr(MTRR_BASE(0), 0x0000000006);
    wrmsr(MTRR_MASK(0), 0xff80000800);
    // High memory (4GB-8GB) = WB
    wrmsr(MTRR_BASE(1), 0x0100000006);
    wrmsr(MTRR_MASK(1), 0xff00000800);
    // High memory (8GB-10GB) = WB
    wrmsr(MTRR_BASE(2), 0x0200000006);
    wrmsr(MTRR_MASK(2), 0xff80000800);
    // VRAM (4GB-4GB+vram_size) = UC
    wrmsr(MTRR_BASE(3), 0x0100000000);
    wrmsr(MTRR_MASK(3), (0xffffffffff - vram_size + 1) | 0x800);

    wbinvd();
    cr3_write(cr3_read()); // TLB flush
    wrmsr(IA32_MTRR_DEF_TYPE, (3<<10)); // MTRRs enabled, default uncachable
    cr0_write(cr0);
    enable_interrupts();
}

static void cleanup_interrupts(void)
{
    int i;
    disable_interrupts();

    // Reset APIC stuff (per-CPU)
    *(volatile u32 *)PA_TO_DM(0xfee00410) = 1;
    for (i = 0x320; i < 0x380; i += 0x10)
        *(volatile u32 *)PA_TO_DM(0xfee00000 + i) = 0x10000;
    for (i = 0x480; i < 0x500; i += 0x10)
        *(volatile u32 *)PA_TO_DM(0xfee00000 + i) = 0xffffffff;
    for (i = 0x500; i < 0x540; i += 0x10)
        *(volatile u32 *)PA_TO_DM(0xfee00000 + i) = 0x10000;
    *(volatile u32 *)PA_TO_DM(0xfee00410) = 0;

    // Fix the LVT offset for thresholding
    wrmsr(0x413, (1L<<24) | (1L<<52));
    wrmsr(0xc0000408, (1L<<24) | (1L<<52));
}

static void cpu_quiesce_gate(void *arg)
{
    int i;

    // Ensure we can write anywhere
    cr0_write(cr0_read() & ~CR0_WP);

    // Interrupt stuff local to each CPU
    cleanup_interrupts();

    // We want to set up MTRRs on all CPUs
    setup_mtrr();

    if (curcpu() != 0) {
        // We're not on BSP. Try to halt.
        __sync_fetch_and_add(&halted_cpus, 1);
        cpu_stop();
    }

    uart_write_str("kexec: Waiting for secondary CPUs...\n");

    // wait for all cpus to halt
    while (!__sync_bool_compare_and_swap(&halted_cpus, 7, 7));

    uart_write_str("kexec: Secondary CPUs quiesced\n");

    //* Put ident mappings in current page tables
    // Should not be needed, but maybe helps for debugging?
    cr4_pge_disable();
    u64 *pml4_base = (u64 *)PA_TO_DM(cr3_read() & 0x000ffffffffff000ull);
    u64 *pdp_base = (u64 *)PA_TO_DM(*pml4_base & 0x000ffffffffff000ull);
    for (u64 i = 0; i < 4; i++) {
            pdp_base[i] = (i << 30) | PG_RW | PG_V | PG_U | PG_PS;
    }

    // Clear (really) low mem.
    // Linux reads from here to try and access EBDA...
    // get_bios_ebda reads u16 from 0x40e
    // reserve_ebda_region reads u16 from 0x413
    // Writing zeros causes linux to default to marking
    // LOWMEM_CAP(0x9f000)-1MB(0x100000) as reserved.
    // It doesn't match the ps4 e820 map, but that seems OK.
    memset((void *)0, 0, 0x1000);

    // Create a new page table hierarchy out of the way of linux
    // Accessed via freebsd direct map
    pml4_base = (u64 *)PA_TO_DM(0x1000); // "boot loader" as per linux boot.txt
    // We only use 1Gbyte mappings. So we need 2 * 0x200 * 8 = 0x2000 bytes :|
    memset(pml4_base, 0, 512 * sizeof(u64) * 2);
    pdp_base = pml4_base + 512;
    u64 pdpe = DM_TO_ID(pdp_base) | PG_RW | PG_V | PG_U;
    pml4_base[0] = pdpe;
    // Maintain the freebsd direct map
    pml4_base[DM_PML4_BASE] = pdpe;
    for (u64 i = 0; i < 4; i++) {
            pdp_base[i] = (i << 30) | PG_RW | PG_V | PG_U | PG_PS;
    }

    uart_write_str("kexec: Setting up GDT...\n");

    struct desc_ptr gdt_ptr;
    struct desc_struct *desc = (struct desc_struct *)(pdp_base + 512);
    gdt_ptr.limit = sizeof(struct desc_struct) * 0x100 - 1;
    gdt_ptr.address = DM_TO_ID(desc);

    // clear
    memset(desc, 0, gdt_ptr.limit + 1);
    // Most things are ignored in 64bit mode, and we will never be in
    // 32bit/compat modes, so just setup another pure-64bit environment...
    // Linux inits it's own GDT in secondary_startup_64
    // 0x10
    desc[2].limit0 = 0xffff;
    desc[2].base0 = 0x0000;
    desc[2].base1 = 0x0000;
    desc[2].type = SEG_TYPE_CODE | SEG_TYPE_EXEC_READ;
    desc[2].s = 1;
    desc[2].dpl = 0;
    desc[2].p = 1;
    desc[2].limit = 0xf;
    desc[2].avl = 0;
    desc[2].l = 1;
    desc[2].d = 0;
    desc[2].g = 0;
    desc[2].base2 = 0x00;
    // 0x18
    desc[3].limit0 = 0xffff;
    desc[3].base0 = 0x0000;
    desc[3].base1 = 0x0000;
    desc[3].type = SEG_TYPE_DATA | SEG_TYPE_READ_WRITE;
    desc[3].s = 1;
    desc[3].dpl = 0;
    desc[3].p = 1;
    desc[3].limit = 0xf;
    desc[3].avl = 0;
    desc[3].l = 0;
    desc[3].d = 0;
    desc[3].g = 0;
    desc[3].base2 = 0x00;
    // Task segment value
    // 0x20
    desc[4].limit0 = 0x0000;
    desc[4].base0 = 0x0000;
    desc[4].base1 = 0x0000;
    desc[4].type = SEG_TYPE_TSS;
    desc[4].s = 1;
    desc[4].dpl = 0;
    desc[4].p = 1;
    desc[4].limit = 0x0;
    desc[4].avl = 0;
    desc[4].l = 0;
    desc[4].d = 0;
    desc[4].g = 0;
    desc[4].base2 = 0x00;

    uart_write_str("kexec: Relocating stub...\n");

    // Relocate the stub and jump to it
    // TODO should thunk_copy be DMAP here?
    void *thunk_copy = (void *)(gdt_ptr.address + gdt_ptr.limit + 1);
    memcpy(thunk_copy, &jmp_to_linux, jmp_to_linux_size);
    // XXX The +0x200 is for the iret stack in linux_thunk.S
    uintptr_t lowmem_pos = DM_TO_ID(thunk_copy) + jmp_to_linux_size + 0x200;

    uart_write_str("kexec: Setting up boot params...\n");

    // XXX we write into this bootargs and pass it to the kernel, but in
    // jmp_to_linux we use the bootargs from the image as input. So they
    // MUST MATCH!
    struct boot_params *bp_lo = (struct boot_params *)lowmem_pos;
    *bp_lo = *nix_info.bp;
    lowmem_pos += sizeof(struct boot_params);

    struct setup_header *shdr = &bp_lo->hdr;
    shdr->cmd_line_ptr = lowmem_pos;
    shdr->ramdisk_image = DM_TO_ID(nix_info.initramfs) & 0xffffffff;
    shdr->ramdisk_size = nix_info.initramfs_size & 0xffffffff;
    bp_lo->ext_ramdisk_image = DM_TO_ID(nix_info.initramfs) >> 32;
    bp_lo->ext_ramdisk_size = nix_info.initramfs_size >> 32;
    shdr->hardware_subarch = X86_SUBARCH_PS4;
    // This needs to be nonzero for the initramfs to work
    shdr->type_of_loader = 0xd0; // kexec

    strlcpy((char *)DM_TO_ID(shdr->cmd_line_ptr), nix_info.cmd_line,
        nix_info.bp->hdr.cmdline_size);
    lowmem_pos += strlen(nix_info.cmd_line) + 1;

    uart_write_str("kexec: Cleaning up hardware...\n");

    // Disable IOMMU
    *(volatile u64 *)PA_TO_DM(0xfc000018) &= ~1;

    // Disable all MSIs on Aeolia
    for (i = 0; i < 8; i++)
        *(volatile u32 *)PA_TO_DM(0xd03c844c + i*4) = 0;

    // Stop HPET timers
    *(volatile u64 *)PA_TO_DM(0xd0382010) = 0;
    *(volatile u64 *)PA_TO_DM(0xd0382100) = 0;
    *(volatile u64 *)PA_TO_DM(0xd0382120) = 0;
    *(volatile u64 *)PA_TO_DM(0xd0382140) = 0;
    *(volatile u64 *)PA_TO_DM(0xd0382160) = 0;

    uart_write_str("kexec: Reconfiguring VRAM...\n");

    configure_vram();

    uart_write_str("kexec: About to relocate and jump to kernel\n");

    ((jmp_to_linux_t)thunk_copy)(
            DM_TO_ID(nix_info.linux_image),
            DM_TO_ID(bp_lo),
            DM_TO_ID(pml4_base),
            (uintptr_t)&gdt_ptr
            );

    // should never reach here
    uart_write_str("kexec: unreachable (?)\n");
}

// Hook for int icc_query_nowait(u8 icc_msg[0x7f0])
int hook_icc_query_nowait(u8 *icc_msg)
{
    kern.printf("hook_icc_query_nowait called\n");

    // Transition to BSP and halt other cpus
    // smp_no_rendevous_barrier is just nullsub, but it is treated specially by
    // smp_rendezvous. This is the easiest way to do this, since we can't assume
    // we're already running on BSP. Since smp_rendezvous normally waits on all
    // cpus to finish the callbacks, we just never return...
    kern.smp_rendezvous(kern.smp_no_rendevous_barrier,
                        cpu_quiesce_gate,
                        kern.smp_no_rendevous_barrier, NULL);

    // should never reach here
    kern.printf("hook_icc_query_nowait: unreachable (?)\n");
    return 0;
}
From Fail0verflow's PS4 kexec Kernel.c via Github: Support 64bit hook displacements
Code:
/*
 * ps4-kexec - a kexec() implementation for Orbis OS / FreeBSD
 *
 * Copyright (C) 2015-2016 shuffle2 <[email protected]>
 * Copyright (C) 2015-2016 Hector Martin "marcan" <[email protected]>
 *
 * This code is licensed to you under the 2-clause BSD license. See the LICENSE
 * file for more information.
 */

#include "kernel.h"
#include "string.h"
#include "elf.h"
#include "x86.h"
#include "magic.h"

struct ksym_t kern;
int (*early_printf)(const char *fmt, ...) = NULL;

#define eprintf(...) do { if (early_printf) early_printf(__VA_ARGS__); } while(0)

#ifdef NO_SYMTAB

#define RESOLVE(name) do {\
    kern.name = (void *)(kern.kern_base + kern_off_ ## name); \
} while (0);

#else

#define KERNSIZE    0x2000000

static const u8 ELF_IDENT[9] = "\x7f" "ELF\x02\x01\x01\x09\x00";
static Elf64_Sym *symtab;
static char *strtab;
static size_t strtab_size;

static Elf64_Ehdr *find_kern_ehdr(void)
{
    // Search for the kernel copy embedded in ubios, then follow it to see
    // where it was relocated to
    for (uintptr_t p = kern.kern_base; p < kern.kern_base + KERNSIZE; p += PAGE_SIZE) {
        Elf64_Ehdr *ehdr = (Elf64_Ehdr *)p;
        if (!memcmp(ehdr->e_ident, ELF_IDENT, sizeof(ELF_IDENT))) {
            for (size_t i = 0; i < ehdr->e_phnum; i++) {
                Elf64_Phdr *phdr = (Elf64_Phdr *)(p + ehdr->e_phoff) + i;
                if (phdr->p_type == PT_PHDR) {
                    return (Elf64_Ehdr *)(phdr->p_vaddr - ehdr->e_phoff);
                }
            }
        }
    }
    return NULL;
}

static Elf64_Dyn *elf_get_dyn(Elf64_Ehdr *ehdr)
{
    Elf64_Phdr *phdr = (Elf64_Phdr *)((uintptr_t)ehdr + ehdr->e_phoff);
    for (size_t i = 0; i < ehdr->e_phnum; i++, phdr++) {
        if (phdr->p_type == PT_DYNAMIC) {
            return (Elf64_Dyn *)phdr->p_vaddr;
        }
    }
    return NULL;
}

static int elf_parse_dyn(Elf64_Dyn *dyn)
{
    for (Elf64_Dyn *dp = dyn; dp->d_tag != DT_NULL; dp++) {
        switch (dp->d_tag) {
            case DT_SYMTAB:
                symtab = (Elf64_Sym *)dp->d_un.d_ptr;
                break;
            case DT_STRTAB:
                strtab = (char *)dp->d_un.d_ptr;
                break;
            case DT_STRSZ:
                strtab_size = dp->d_un.d_val;
                break;
        }
    }
    return symtab && strtab && strtab_size;
}

void *kernel_resolve(const char *name)
{
    for (Elf64_Sym *sym = symtab; (uintptr_t)(sym + 1) < (uintptr_t)strtab; sym++) {
        if (!strcmp(name, &strtab[sym->st_name])) {
            eprintf("kern.%s = %p\n", name, (void*)sym->st_value);
            return (void *)sym->st_value;
        }
    }
    eprintf("Failed to resolve symbol '%s'\n", name);
    return NULL;
}

#define RESOLVE(name) if (!(kern.name = kernel_resolve(#name))) return 0;

#endif

static int resolve_symbols(void)
{
    RESOLVE(printf);
    early_printf = kern.printf;
    RESOLVE(copyin);
    RESOLVE(copyout);
    RESOLVE(copyinstr);
    RESOLVE(kernel_map);
    RESOLVE(kernel_pmap_store);
    RESOLVE(kmem_alloc_contig);
    RESOLVE(kmem_free);
    RESOLVE(pmap_extract);
    RESOLVE(pmap_protect);
    RESOLVE(sysent);
    RESOLVE(sched_pin);
    RESOLVE(sched_unpin);
    RESOLVE(smp_rendezvous);
    RESOLVE(smp_no_rendevous_barrier);
    RESOLVE(Starsha_UcodeInfo);
    RESOLVE(icc_query_nowait);
    return 1;
}

#define    M_WAITOK 0x0002
#define    M_ZERO   0x0100

#define    VM_MEMATTR_DEFAULT        0x06

void *kernel_alloc_contig(size_t size)
{
    // use kmem_alloc_contig instead of contigalloc to avoid messing with a malloc_type...
    vm_offset_t ret = kern.kmem_alloc_contig(
        *kern.kernel_map, size, M_ZERO | M_WAITOK, (vm_paddr_t)0,
        ~(vm_paddr_t)0, 1, 0, VM_MEMATTR_DEFAULT);

    if (!ret) {
        kern.printf("Failed to allocate %d bytes\n", size);
        return NULL;
    }
    return (void *)PA_TO_DM(kern.pmap_extract(kern.kernel_pmap_store, ret));
}

void kernel_free_contig(void *addr, size_t size)
{
    if (!addr)
        return;
    kern.kmem_free(*kern.kernel_map, (vm_offset_t)addr, size);
}

int kernel_hook_install(void *target, void *hook)
{
    uintptr_t t = (uintptr_t)target; // addr to redirect to
    uintptr_t h = (uintptr_t)hook; // place to write the thunk

    if (!hook || !target) {
        return 0;
    }

    kern.printf("kernel_hook_install(%p, %p)\n", target, hook);

    if (!(t & (1L << 63))) {
        kern.printf("\n===================== WARNING =====================\n");
        kern.printf("hook target function address: %p\n", t);
        kern.printf("It looks like we're running from userland memory.\n");
        kern.printf("Please run this code from a kernel memory mapping.\n\n");
        return 0;
    }
    s64 displacement = t - (h + 5);

    kern.sched_pin();
    u64 wp = write_protect_disable();
    if (displacement < -0x80000000 || displacement > 0x7fffffff) {
        kern.printf("  Using 64bit absolute jump\n");
        struct __attribute__((packed)) jmp_t{
            u8 op[2];
            s32 zero;
            void *target;
        } jmp = {
            .op = { 0xff, 0x25 },
            .zero = 0,
            .target = target,
        };
        ASSERT_STRSIZE(struct jmp_t, 14);
        memcpy(hook, &jmp, sizeof(jmp));
    } else {
        kern.printf("  Using 32bit relative jump\n");
        struct __attribute__((packed)) jmp_t{
            u8 op[1];
            s32 imm;
        } jmp = {
            .op = { 0xe9 },
            .imm = displacement,
        };
        ASSERT_STRSIZE(struct jmp_t, 5);
        memcpy(hook, &jmp, sizeof(jmp));
    }
    wbinvd();
    write_protect_restore(wp);
    kern.sched_unpin();

    return 1;
}

void kernel_syscall_install(int num, void *call, int narg)
{
    struct sysent_t *sy = &kern.sysent[num];

    kern.sched_pin();
    u64 wp = write_protect_disable();

    memset(sy, 0, sizeof(*sy));
    sy->sy_narg = narg;
    sy->sy_call = call;
    sy->sy_thrcnt = 1;

    write_protect_restore(wp);
    kern.sched_unpin();
}

void kernel_remap(void *start, void *end, int perm)
{
    u64 s = ((u64)start) & ~(u64)(PAGE_SIZE-1);
    u64 e = ((u64)end + PAGE_SIZE - 1) & ~(u64)(PAGE_SIZE-1);

    kern.printf("pmap_protect(pmap, %p, %p, %d)\n", (void*)s, (void*)e, perm);
    kern.pmap_protect(kern.kernel_pmap_store, s, e, perm);
}

static volatile int _global_test = 0;

#ifndef DO_NOT_REMAP_RWX
extern u8 _start[], _end[];

static int patch_pmap_check(void)
{
    u8 *p;

    for (p = (u8*)kern.pmap_protect;
         p < ((u8*)kern.pmap_protect + 0x500); p++) {
        if (!memcmp(p, "\x83\xe0\x06\x83\xf8\x06", 6)) {
            p[2] = 0;
            kern.printf("pmap_protect patch successful (found at %p)\n", p);
            return 1;
        }
    }
    kern.printf("pmap_protect patch failed!\n");
    return 0;
}
#endif

int kernel_init(void)
{
    int rv = -1;
    eprintf("kernel_init()\n");

#ifdef KASLR
    // use `early_printf` to calculate kernel base
    if (early_printf == NULL)
        return 0;

    kern.kern_base = (u64)(early_printf - kern_off_printf);
    if ((kern.kern_base & PAGE_MASK) != 0) {
        eprintf("Kernel base is not aligned\n");
        return 0;
    } else {
        eprintf("Kernel base = %p\n", kern.kern_base);
    }

    u64 DMPML4I = *(u32 *)(kern.kern_base + kern_off_dmpml4i);
    u64 DMPDPI = *(u32 *)(kern.kern_base + kern_off_dmpdpi);

#else
    kern.kern_base = KVADDR(0x1ff, 0x1fe, 0, 0); // 0xffffffff80000000

    u64 DMPML4I = 0x1fc;
    u64 DMPDPI = 0;
#endif

    kern.dmap_base = KVADDR(DMPML4I, DMPDPI, 0, 0);
    eprintf("Direct map base = %p\n", kern.dmap_base);

    // We may not be mapped writable yet, so to be able to write to globals
    // we need WP disabled.
    u64 flags = intr_disable();
    u64 wp = write_protect_disable();

#ifndef NO_SYMTAB
    Elf64_Ehdr *ehdr = find_kern_ehdr();
    if (!ehdr) {
        eprintf("Could not find kernel ELF header\n");
        goto err;
    }
    eprintf("ELF header at %p\n", ehdr);

    Elf64_Dyn *dyn = elf_get_dyn(ehdr);
    if (!dyn) {
        eprintf("Could not find kernel dynamic header\n");
        goto err;
    }
    eprintf("ELF dynamic section at %p\n", dyn);

    if (!elf_parse_dyn(dyn)) {
        eprintf("Failed to parse ELF dynamic section\n");
        goto err;
    }
#endif

    if (!resolve_symbols()) {
        eprintf("Failed to resolve all symbols\n");
        goto err;
    }

    // Pin ourselves as soon as possible. This is expected to be released by the caller.
    kern.sched_pin();

#ifndef DO_NOT_REMAP_RWX
    if (!patch_pmap_check())
        goto err;
#endif

#ifndef DO_NOT_REMAP_RWX
    // kernel_remap may need interrupts, but may not write to globals!
    enable_interrupts();
    kernel_remap(_start, _end, 7);
    disable_interrupts();
#endif

    // Writing to globals is now safe.

    kern.printf("Testing global variable access (write protection)...\n");
    _global_test = 1;
    kern.printf("OK.\n");

    kern.printf("Kernel interface initialized\n");
    rv = 0;

err:
    write_protect_restore(wp);
    intr_restore(flags);
    return rv;
}

From Fail0verflow's PS4 kexec Linux_boot.c via Github: Bump VRAM to 1GB
Code:
OLD 512MB VRam:
static u64 vram_size = 512 * 1024 * 1024;

Updated 1GB VRam:
static u64 vram_size = 1024 * 1024 * 1024;

Thanks to @VultraAID for the heads-up in the PSXHAX Shoutbox earlier today and @JarJarBlinx for the reminder! (y)
PS4 4.05 Offsets Now Added to Fail0verflow's Kexec GIT by Marcan.jpg
 

Comments

Remember that failoverflow doesnt give a full kexploit.
Just like 1.76 they say here are the patches, find your own.

Lets see what they bring up 27 december
 
Before any ask's on shoutbox "You need kspolit to enable kernel exploit" you already have browser exploit you just need to find the rest
 
Status
Not open for further replies.
Back
Top